repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
โŒ€
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
RitchieFu/scrambly-boi-py
https://github.com/RitchieFu/scrambly-boi-py
69f900392240b0a71932b9bd205e7d90fb192f70
c029913b3ff7adbe8be977bd517b5383a3f560b5
13d7c5ff0c04bdf937da4071e3120797cea407c5
refs/heads/master
2023-04-04T13:14:19.883076
2021-04-06T22:34:34
2021-04-06T22:34:34
329,806,029
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.39131927490234375, "alphanum_fraction": 0.42991936206817627, "avg_line_length": 19.528169631958008, "blob_id": "690b8feaecabd34927d0cb44b8e65d7c445a0a8c", "content_id": "cc700c7a56bf4ac21c4e58bb7377109080a6a10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5829, "license_type": "no_license", "max_line_length": 172, "num_lines": 284, "path": "/scrambleupdate.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image, ImageDraw\nimport random\n\ndef scrambleimage(layers, scramble):\n global faceU\n global faceD\n global faceR\n global faceL\n global faceF\n global faceB\n\n faceU = np.repeat(\"W\", layers**2).reshape(layers, layers)\n faceD = np.repeat(\"Y\", layers**2).reshape(layers, layers)\n faceR = np.repeat(\"R\", layers**2).reshape(layers, layers)\n faceL = np.repeat(\"O\", layers**2).reshape(layers, layers)\n faceF = np.repeat(\"G\", layers**2).reshape(layers, layers)\n faceB = np.repeat(\"B\", layers**2).reshape(layers, layers)\n\n def xAxis(slice: int = layers - 1):\n inverse = layers - slice - 1\n faceU[:, slice], faceF[:, slice], faceD[:, slice], faceB[:, inverse] = faceF[:, slice], faceD[:, slice], faceB[:, inverse][::-1], faceU[:, slice][::-1].copy()\n\n def yAxis(slice: int = 0):\n faceF[[slice]], faceR[[slice]], faceB[[slice]], faceL[[slice]] = faceR[[slice]], faceB[[slice]], faceL[[slice]], faceF[[slice]]\n \n def zAxis(slice: int = layers - 1):\n inverse = layers - slice - 1\n faceU[[slice]], faceR[:, inverse], faceD[[inverse]], faceL[:, slice] = faceL[:, slice][::-1], faceU[[slice]], faceR[:, inverse][::-1].copy(), faceD[[inverse]][::-1]\n\n def R():\n global faceR\n xAxis()\n faceR = np.rot90(faceR, 3)\n\n def Rp():\n R(), R(), R()\n\n def R2():\n R(), R()\n\n def L():\n Lp(), Lp(), Lp()\n\n def Lp():\n global faceL\n xAxis(0)\n faceL = np.rot90(faceL)\n\n def L2():\n Lp(), Lp()\n\n def U():\n global faceU\n yAxis()\n faceU = np.rot90(faceU, 3)\n\n def Up():\n U(), U(), U()\n\n def U2():\n U(), U()\n\n def D():\n Dp(), Dp(), Dp()\n\n def Dp():\n global faceD\n yAxis(layers-1)\n faceD = np.rot90(faceD)\n\n def D2():\n Dp(), Dp()\n\n def F():\n global faceF\n zAxis()\n faceF = np.rot90(faceF, 3)\n\n def Fp():\n F(), F(), F()\n\n def F2():\n F(), F()\n\n def B():\n Bp(), Bp(), Bp()\n\n def Bp():\n global faceB\n zAxis(0)\n faceB = np.rot90(faceB)\n\n def B2():\n Bp(), Bp()\n\n # ----------\n\n def Rw():\n R(), xAxis(layers-2)\n\n def Rwp():\n Rw(), Rw(), Rw()\n\n def Rw2():\n Rw(), Rw()\n\n def Lw():\n Lwp(), Lwp(), Lwp()\n\n def Lwp():\n Lp(), xAxis(1)\n\n def Lw2():\n Lwp(), Lwp()\n\n def Uw():\n U(), yAxis(1)\n\n def Uwp():\n Uw(), Uw(), Uw()\n\n def Uw2():\n Uw(), Uw()\n\n def Dw():\n Dwp(), Dwp(), Dwp()\n\n def Dwp():\n Dp(), yAxis(layers-2)\n\n def Dw2():\n Dwp(), Dwp()\n\n def Fw():\n F(), zAxis(layers-2)\n\n def Fwp():\n Fw(), Fw(), Fw()\n\n def Fw2():\n Fw(), Fw()\n\n def Bw():\n Bwp(), Bwp(), Bwp()\n\n def Bwp():\n Bp(), zAxis(1)\n\n def Bw2():\n Bwp(), Bwp()\n\n # ------\n\n def tRw():\n Rw(), xAxis(layers-3)\n\n def tRwp():\n tRw(), tRw(), tRw()\n\n def tRw2():\n tRw(), tRw()\n\n def tLw():\n tLwp(), tLwp(), tLwp()\n\n def tLwp():\n Lwp(), xAxis(2)\n\n def tLw2():\n tLwp(), tLwp()\n\n def tUw():\n Uw(), yAxis(2)\n\n def tUwp():\n tUw(), tUw(), tUw()\n\n def tUw2():\n tUw(), tUw()\n\n def tDw():\n tDwp(), tDwp(), tDwp()\n\n def tDwp():\n Dwp(), yAxis(layers-3)\n\n def tDw2():\n tDwp(), tDwp()\n\n def tFw():\n Fw(), zAxis(layers-3)\n\n def tFwp():\n tFw(), tFw(), tFw()\n\n def tFw2():\n tFw(), tFw()\n\n def tBw():\n tBwp(), tBwp(), tBwp()\n\n def tBwp():\n Bwp(), zAxis(2)\n\n def tBw2():\n tBwp(), tBwp()\n\n # ------\n\n moveMap = {\n 'U': U, \"U'\": Up, 'U2': U2, 'U2\\'': U2,\n 'D': D, \"D'\": Dp, 'D2': D2, 'D2\\'': D2,\n 'R': R, \"R'\": Rp, 'R2': R2, 'R2\\'': R2,\n 'L': L, \"L'\": Lp, 'L2': L2, 'L2\\'': L2,\n 'F': F, \"F'\": Fp, 'F2': F2, 'F2\\'': F2,\n 'B': B, \"B'\": Bp, 'B2': B2, 'B2\\'': B2,\n\n \"Uw\":Uw, \"Uw'\":Uwp, \"Uw2\":Uw2, \n \"Dw\":Dw, \"Dw'\":Dwp, \"Dw2\":Dw2, \n \"Rw\":Rw, \"Rw'\":Rwp, \"Rw2\":Rw2, \n \"Lw\":Lw, \"Lw'\":Lwp, \"Lw2\":Lw2, \n \"Fw\":Fw, \"Fw'\":Fwp, \"Fw2\":Fw2, \n \"Bw\":Bw, \"Bw'\":Bwp, \"Bw2\":Bw2, \n\n \"3Uw\":tUw, \"3Uw'\":tUwp, \"3Uw2\":tUw2, \n \"3Dw\":tDw, \"3Dw'\":tDwp, \"3Dw2\":tDw2, \n \"3Rw\":tRw, \"3Rw'\":tRwp, \"3Rw2\":tRw2, \n \"3Lw\":tLw, \"3Lw'\":tLwp, \"3Lw2\":tLw2, \n \"3Fw\":tFw, \"3Fw'\":tFwp, \"3Fw2\":tFw2, \n \"3Bw\":tBw, \"3Bw'\":tBwp, \"3Bw2\":tBw2, \n }\n\n [moveMap[move]() for move in scramble.split()]\n\n # ------\n\n img = Image.new('RGBA', (1325, 1000), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n\n colors = {\n \"R\": (200, 0, 0),\n \"O\": (241,147,1),\n \"W\": (255, 255, 255), \n \"Y\": (245, 238, 0),\n \"G\": (81, 227, 0),\n \"B\": (3, 132, 252)\n }\n\n width = int(7/layers)+1\n boxSize = 300/layers\n\n x = 25\n y = 350\n z = 0\n middle = np.concatenate([faceL, faceF, faceR, faceB], axis=None)\n\n for i in range(4):\n for j in range(layers):\n for k in range(layers):\n draw.rectangle((x,y,x+boxSize,y+boxSize), fill=colors[middle[z]], outline=\"black\", width=width)\n x += boxSize\n z += 1\n x -= 300\n y += boxSize\n x += 325\n y -= 300\n\n x = 350\n y = 25\n z = 0\n topBottom = np.concatenate([faceU, faceD], axis=None)\n\n for i in range(2):\n for j in range(layers):\n for k in range(layers):\n draw.rectangle((x,y,x+boxSize,y+boxSize), fill=colors[topBottom[z]], outline=\"black\", width=width)\n x += boxSize\n z += 1\n x -= 300\n y += boxSize\n y += 350\n\n return img" }, { "alpha_fraction": 0.4729878604412079, "alphanum_fraction": 0.5090323090553284, "avg_line_length": 34.94512176513672, "blob_id": "65d69985926da6819982f092622a9c89ce76e5de", "content_id": "8c65b0dff7ae1755f3f76798fef6fd764421ce7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11791, "license_type": "no_license", "max_line_length": 230, "num_lines": 328, "path": "/cogs/memo.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nimport kociemba\nimport re\nfrom scrambles import *\nimport sqlite3\n\ndef get_preferences(id): \n conn = sqlite3.connect('custom.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cubers WHERE id = ?\", (id,))\n return c.fetchone()\n conn.close()\n \n\t\nclass Memo(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def custom(self, ctx):\n\n msg = ctx.message.content.split()\n\n class Cuber:\n def __init__(self, id, corners, edges, CB, EB):\n self.id = id\n self.corners = corners\n self.edges = edges\n self.CB = CB\n self.EB = EB\n\n try: \n cuber = Cuber(ctx.message.author.id, msg[2], msg[3], msg[4], msg[5])\n except IndexError:\n await ctx.send(\"Here is the format: **plz custom [letters for corners] [letters for edges] [corner buffer] [edge buffer]**\\n_All elements need to be separated by a space. Do not include brackets like in the example._\")\n return\n\n conn = sqlite3.connect('custom.db')\n\n c = conn.cursor()\n\n # c.execute(\"\"\"CREATE TABLE cubers (\n # id integer,\n # cornerScheme text,\n # edgeScheme text,\n # cornerBufferTarget text,\n # edgeBufferTarget text\n # )\"\"\")\n\n def insert_cuber(cuber):\n with conn:\n c.execute(\"INSERT INTO cubers VALUES (?, ?, ?, ?, ?)\", (cuber.id, cuber.corners, cuber.edges, cuber.CB, cuber.EB))\n conn.commit()\n\n def get_preferences(id):\n c.execute(\"SELECT * FROM cubers WHERE id=:id\", {'id': id})\n return c.fetchone()\n\n def update_preferences(cuber):\n with conn:\n c.execute(\"\"\"UPDATE cubers SET cornerScheme = ?, edgeScheme = ?, cornerBufferTarget = ?, edgeBufferTarget = ? WHERE id = ?\"\"\", (cuber.corners, cuber.edges, cuber.CB, cuber.EB, cuber.id))\n conn.commit()\n\n def validLetterScheme(letters):\n return letters.upper().isalpha() and len(letters) == 24 and len(set(letters)) == len(letters)\n \n def validCorners(buffer):\n return len(buffer) == 1 and buffer in cuber.corners\n\n def validEdges(buffer):\n return len(buffer) == 1 and buffer in cuber.edges\n \n errors = [\"Error(s):\"]\n\n if not validLetterScheme(cuber.corners) or not validLetterScheme(cuber.edges):\n errors.append(\"Letter schemes must contain 24 English letters and must not contain any repeats.\")\n\n if not validCorners(cuber.CB) or not validEdges(cuber.EB):\n errors.append(\"Buffers must appear in their respective letter schemes.\")\n\n if len(errors) > 1:\n await ctx.send('\\n- '.join(errors))\n else:\n if get_preferences(ctx.message.author.id): \n update_preferences(cuber)\n else:\n insert_cuber(cuber)\n await ctx.send(\"Your settings have been updated.\")\n \n conn.close()\n \n\n @commands.command()\n async def settings(self, ctx):\n pref = get_preferences(ctx.message.author.id)\n try:\n await ctx.send(\"**Corners**: {}\\n**Edges**: {}\\n**Corner Buffer**: {}\\n**Edge Buffer**: {}\".format(pref[1].upper(), pref[2].upper(), pref[3].upper(), pref[4].upper()))\n except:\n await ctx.send(\"**Corners**: ABCDEFGHIJKLMNOPQRSTUVWX\\n**Edges**: ABCDEFGHIJKLMNOPQRSTUVWX\\n**Corner Buffer**: A\\n**Edge Buffer**: B\")\n\n\n @commands.command()\n async def reset(self, ctx):\n\n def remove_cuber(id):\n conn = sqlite3.connect('custom.db')\n c = conn.cursor()\n with conn:\n c.execute(\"DELETE from cubers WHERE id = :id\",\n {'id': id})\n conn.commit()\n conn.close()\n\n if get_preferences(ctx.message.author.id):\n remove_cuber(ctx.message.author.id)\n await ctx.send(\"Your settings have been reset.\")\n else:\n await ctx.send(\"User not in database.\")\n\n\n @commands.command()\n async def memo(self, ctx):\n\n orientations = {\n 12: \"y\", 13: \"\", 14: \"y'\", 15: \"y2\",\n 21: \"z' x\", 23: \"z'\", 25: \"z' x2\", 26: \"z' x\",\n 31: \"z2 x\", 32: \"z y\", 34: \"z' y'\", 36: \"x'\",\n 41: \"z x\", 43: \"z\", 45: \"z x2\", 46: \"z x'\",\n 51: \"x\", 52: \"z' y\", 54: \"z y'\", 56: \"z2 x'\",\n 62: \"z2 y\", 63: \"z2\", 64: \"z2 y'\", 65: \"x2\"\n }\n\n translator = {\n 12: \"UFRBLD\", 13: \"ULFRBD\", 14: \"UBLFRD\", 15: \"URBLFD\",\n 21: \"FULDRB\", 23: \"RUFDBL\", 25: \"LUBDFR\", 26: \"BURDLF\",\n 31: \"FRULDB\", 32: \"LFUBDR\", 34: \"RBUFDL\", 36: \"BLURDF\",\n 41: \"FDRULB\", 43: \"LDFUBR\", 45: \"RDBUFL\", 46: \"BDLURF\",\n 51: \"FLDRUB\", 52: \"RFDBUL\", 54: \"LBDFUR\", 56: \"BRDLUF\",\n 62: \"DFLBRU\", 63: \"DRFLBU\", 64: \"DBRFLU\", 65: \"DLBRFU\"\n }\n\n try:\n moves = ((' '.join(ctx.message.content.split()[2:])).replace(\"Rw\", 'r').replace(\n \"Lw\", 'l').replace(\"Uw\", 'u').replace(\"Dw\", 'd').replace(\"Fw\", 'f').replace(\"Bw\", 'b')).split()\n except IndexError:\n return\n\n finalO = moves[-1]\n finalOBool = False\n if finalO[0] == '-' and finalO.count('-') == 1:\n try:\n finalO = int(finalO.replace('-', '').replace('W', '1').replace('O', '2').replace(\n 'G', '3').replace('R', '4').replace('B', '5').replace('Y', '6'))\n except ValueError:\n return\n\n finalOBool = True\n del moves[-1]\n\n try:\n returnedBLD3 = ''.join(bld3(moves))\n except KeyError:\n return\n\n topFront = int(returnedBLD3[4] + returnedBLD3[22])\n if not finalOBool:\n finalO = topFront\n\n try:\n tMoves = list(translator[finalO])\n except KeyError:\n return\n\n for rotation in orientations[topFront].split():\n moves.append(rotation)\n\n kSolve = kociemba.solve(''.join(cubestring(moves))).replace(' ', '')\n\n acceptedMoves = ['U', 'D', 'R', 'L', 'F', 'B']\n inversedString = []\n k = len(kSolve) - 1\n\n while k > -1:\n if kSolve[k] == \"'\":\n inversedString.append(kSolve[k-1])\n k -= 2\n elif kSolve[k] == \"2\":\n inversedString.append(kSolve[k-1] + kSolve[k])\n k -= 2\n elif kSolve[k] in acceptedMoves:\n inversedString.append(kSolve[k] + \"'\")\n k -= 1\n else:\n break\n \n inversedString = ' '.join(inversedString).translate(str.maketrans('ULFRBD', ''.join(tMoves)))\n newBLD3 = ''.join(bld3(inversedString.split()))\n allLetters = ''.join([i for i in newBLD3 if i.isalpha()])\n \n # Corners ----\n cCorrect = 'ABDCEFHGIJLKMNPOSTRQUVXW'\n cMemo = []\n cMsg = re.findall('([A-Z])', allLetters)\n cPriority = list('POMLNHTUCFBDISKVGJWXQ')\n cReplace = {\n 0:(4,18), 4:(18,0), 18:(0,4), \n 1:(19,13), 19:(13,1), 13:(1,19),\n 3:(12,9), 12:(9,3), 9:(3,12), \n 2:(8,5), 8:(5,2), 5:(2,8), \n 20:(7,10), 7:(10,20), 10:(20,7), \n 21:(11,14), 11:(14,21), 14:(21,11), \n 23:(15,17), 15:(17,23), 17:(23,15), \n 22:(16,6), 16:(6,22), 6:(22,16)\n }\n\n preferences = get_preferences(ctx.message.author.id)\n\n def toCorrectCornerString(corners):\n correctedString = [] \n splitEveryFour = re.findall('....', corners)\n for substring in splitEveryFour:\n substring = substring[:2] + substring[-1] + substring[2]\n correctedString.append(substring)\n correctedString[4] = correctedString[4][::-1]\n return ''.join(correctedString)\n\n cBuffer1 = 0\n cBuffer2 = 4\n cBuffer3 = 18\n if preferences:\n correctCornerString = toCorrectCornerString(preferences[1])\n cBuffer1 = correctCornerString.index(preferences[3])\n cBuffer2 = cReplace[cBuffer1][0]\n cBuffer3 = cReplace[cBuffer1][1]\n\n def swapCorners(current):\n swap0 = cCorrect.index(current)\n swap1 = cReplace[swap0][0]\n swap2 = cReplace[swap0][1]\n\n cMsg[cBuffer1], cMsg[swap0] = cMsg[swap0], cMsg[cBuffer1]\n cMsg[cBuffer2], cMsg[swap1] = cMsg[swap1], cMsg[cBuffer2]\n cMsg[cBuffer3], cMsg[swap2] = cMsg[swap2], cMsg[cBuffer3]\n\n while ''.join(cMsg) != cCorrect:\n current = cMsg[cBuffer1]\n\n if current == cCorrect[cBuffer1] or current == cCorrect[cBuffer2] or current == cCorrect[cBuffer3]:\n cCopy = cMsg.copy()\n cCopy = [i for i in cCopy if cCopy.index(\n i) != list(cCorrect).index(i)]\n cCopy = [i for i in cPriority if i in cCopy]\n current = cCopy[0]\n\n cMemo.append(current)\n swapCorners(current)\n\n # Edges ----\n eCorrect = 'adbcehfgiljkmpnosrtquxvw'\n eMemo = []\n eMsg = re.findall('([a-z])', allLetters)\n ePriority = list('dlrxfhpntjuwvegaciqkso')\n eReplace = {\n 0:19, 19:0, 2:12, 12:2, \n 3:8, 8:3, 1:4, 4:1, \n 6:9, 9:6, 10:13, 13:10, \n 14:18, 18:14, 17:5, 5:17, \n 11:20, 20:11, 15:22, 22:15, \n 16:23, 23:16, 7:21, 21:7\n }\n\n def toCorrectEdgeString(edges):\n\n correctedString = [] \n splitEveryFour = re.findall('....', edges)\n for substring in splitEveryFour:\n substring = substring[0] + substring[-1] + substring[1:-1]\n correctedString.append(substring)\n correctedString[4] = correctedString[4][::-1]\n return ''.join(correctedString)\n\n eBuffer1 = 2\n eBuffer2 = 12\n if preferences:\n correctEdgeString = toCorrectEdgeString(preferences[2])\n eBuffer1 = correctEdgeString.index(preferences[4])\n eBuffer2 = eReplace[eBuffer1]\n\n def swapEdges(current):\n swap0 = eCorrect.index(current)\n swap1 = eReplace[swap0]\n\n eMsg[eBuffer1], eMsg[swap0] = eMsg[swap0], eMsg[eBuffer1]\n eMsg[eBuffer2], eMsg[swap1] = eMsg[swap1], eMsg[eBuffer2]\n\n while ''.join(eMsg) != eCorrect:\n current = eMsg[eBuffer1]\n\n if current == eCorrect[eBuffer1] or current == eCorrect[eBuffer2]:\n eCopy = eMsg.copy()\n eCopy = [i for i in eCopy if eCopy.index(\n i) != list(eCorrect).index(i)]\n eCopy = [i for i in ePriority if i in eCopy]\n current = eCopy[0]\n\n eMemo.append(current)\n swapEdges(current)\n \n # Final output ----\n cMemo = ' '.join(cMemo)\n eMemo = ' '.join(eMemo).upper()\n\n if preferences:\n cMemo = cMemo.translate(str.maketrans('ABCDEFGHIJKLMNOPQRSTUVWX', str(preferences[1])))\n eMemo = eMemo.translate(str.maketrans('ABCDEFGHIJKLMNOPQRSTUVWX', str(preferences[2])))\n\n parityBool = 'no' if len(cMemo.split()) % 2 == 0 else 'yes'\n await ctx.send('```yaml\\n' +\n 'Corners: ' + cMemo +\n '\\nEdges: ' + eMemo +\n '\\nParity: '+ parityBool +\n '\\n```')\n\n\ndef setup(client):\n client.add_cog(Memo(client))\n\n" }, { "alpha_fraction": 0.537571132183075, "alphanum_fraction": 0.5883853435516357, "avg_line_length": 40.778690338134766, "blob_id": "0ab97c544b9b284e0dca430f0bc7bb028ea10baf", "content_id": "3bbb4501e34d20b00aa78af799ab547f79246e00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5097, "license_type": "no_license", "max_line_length": 184, "num_lines": 122, "path": "/squareoneimage.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import ast\nimport math\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\ndef squanimage(scramble):\n # defining the colors of the layers and faces\n uFace = np.array([[\"y2\",\"y2\",\"y1\",\"y2\",\"y2\",\"y1\"], [\"y2\",\"y2\",\"y1\",\"y2\",\"y2\",\"y1\"]])\n dFace = np.array([[\"w1\",\"w2\",\"w2\",\"w1\",\"w2\",\"w2\"], [\"w1\",\"w2\",\"w2\",\"w1\",\"w2\",\"w2\"]])\n uLayer = np.array([[\"r\",\"b\",\"b\",\"b\",\"o\",\"o\"], [\"o\",\"g\",\"g\",\"g\",\"r\",\"r\"]])\n dLayer = np.array([[\"o\",\"o\",\"b\",\"b\",\"b\",\"r\"], [\"r\",\"r\",\"g\",\"g\",\"g\",\"o\"]])\n # colors of the pieces\n colors = {\n \"w1\": (255, 255, 255), \n \"w2\": (255, 255, 255), \n \"y1\": (245, 238, 0),\n \"y2\": (245, 238, 0),\n \"g\": (81, 227, 0),\n \"b\": (3, 132, 252),\n \"r\": (200, 0, 0),\n \"o\": (241,147,1),\n }\n # swaps the U and D faces and layers just like a normal slice\n def slice():\n temp = np.copy(dFace[1])\n dFace[1] = uFace[1]\n uFace[1] = temp\n\n temp = np.copy(dLayer[1])\n dLayer[1] = uLayer[1]\n uLayer[1] = temp\n\n def draw_edge(degrees, topColor, sideColor):\n width = int(500*(2**.5)) # edge height is 250 so its hypotenuse would be 250sqrt2, times 2 is 500sqrt2\n # did this so that corners would stay on canvas when rotated\n img = Image.new('RGBA', (width, width), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n # setting up the canvas and math stuff\n center = width/2 # self explanatory\n edgeHeight = 250 # distance from center to the outside \n offset = math.tan(15*math.pi / 180) * 250 # distance of half of the base of an edge \n\n draw.polygon((center, center, center-offset, center-edgeHeight, center+offset, center-edgeHeight), fill=colors[sideColor], outline=\"black\")\n edgeHeight *= .65\n offset *= .65\n draw.polygon((center, center, center-offset, center-edgeHeight, center+offset, center-edgeHeight), fill=colors[topColor], outline=\"black\")\n\n return img.rotate(-degrees)\n\n def draw_corner(degrees, topColor, sideColors):\n width = int(500*(2**.5))\n img = Image.new('RGBA', (width, width), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n\n center = width/2\n edgeHeight = 250\n offset = math.tan(15*math.pi / 180) * 250\n\n draw.polygon((center, center, center+offset, center-edgeHeight, center+edgeHeight, center-edgeHeight), fill=colors[sideColors[0]], outline=\"black\")\n draw.polygon((center, center, center+edgeHeight, center-edgeHeight, center+edgeHeight, center-offset), fill=colors[sideColors[1]], outline=\"black\")\n # draws a polygon for each of the side colors of a corner pieces of which there are 2\n edgeHeight *= .65\n offset *= .65\n draw.polygon((center, center, center+offset, center-edgeHeight, center+edgeHeight, center-edgeHeight, center+edgeHeight, center-offset), fill=colors[topColor], outline=\"black\")\n # draws a smaller version of the previous two polygons, combined\n return img.rotate(-degrees)\n\n numOfSlices = scramble.count(\"/\")\n scramble = [x.strip(' ') for x in scramble.split(\"/\")]\n\n if not scramble[-1]:\n del scramble[-1]\n else:\n scramble.append(\"(0,0)\")\n\n for count, move in enumerate(scramble):\n move = ast.literal_eval(move)\n uFace = np.roll(uFace, move[0])\n dFace = np.roll(dFace, move[1])\n uLayer = np.roll(uLayer, move[0])\n dLayer = np.roll(dLayer, move[1])\n slice()\n\n img = Image.new('RGBA', (1405, 850), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n\n uFace = uFace.flatten()\n uLayer = uLayer.flatten()\n dFace = dFace.flatten()\n dLayer = dLayer.flatten()\n\n i = 0 # iterator, used while loop instead of for loop because I needed a way to jump forward two elements\n x = 0 # x position of the pasted images\n y = 0 # y position of the pasted images\n degrees = 180 # starting orientation of upper face drawings\n currentFace = uFace \n currentLayer = uLayer\n for j in range(2):\n while i < 12:\n if currentFace[i] == \"y2\" or currentFace[i] == \"w2\": # draws an edge\n img.paste(draw_corner(degrees, currentFace[i], currentLayer[i:i+2]), (x,y), mask=draw_corner(degrees, currentFace[i], currentLayer[i:i+2]))\n degrees += 60\n i += 2\n else: # draws a corner\n degrees += 30\n img.paste(draw_edge(degrees, currentFace[i], currentLayer[i]), (x,y), mask=draw_edge(degrees, currentFace[i], currentLayer[i]))\n i += 1\n i = 0 \n x = 700\n degrees = 150\n currentFace = dFace\n currentLayer = dLayer\n\n # draws the middle layer \n # also I didn't know how to not use \"magic numbers\" for this because it was kind of just eyeballed to match cstimer\n draw.rectangle((450,700,600,800), fill=\"red\", outline=\"black\")\n if numOfSlices % 2 == 0:\n draw.rectangle((600,700,950,800), fill=\"red\", outline=\"black\")\n else:\n draw.rectangle((600,700,750,800), fill=\"orange\", outline=\"black\")\n\n return img\n" }, { "alpha_fraction": 0.6009795665740967, "alphanum_fraction": 0.62143474817276, "avg_line_length": 38.431819915771484, "blob_id": "6451a329aad2751edafdcec3d265b6af6ad7e652", "content_id": "8e453d351f61de3a824cf1ee988113e6be2caf8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3483, "license_type": "no_license", "max_line_length": 196, "num_lines": 88, "path": "/cogs/scrambleImages.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import asyncio\nimport discord\nfrom discord.ext import commands\nfrom pyTwistyScrambler import scrambler333, scrambler222, scrambler444, scrambler555, pyraminxScrambler,scrambler666,scrambler777,megaminxScrambler,squareOneScrambler,skewbScrambler,clockScrambler\nfrom clockimage import *\nfrom megaimage import *\nfrom pyraminximage import *\nfrom scrambleupdate import *\nfrom skewbimage import *\nfrom squareoneimage import *\n\n\nclass ScrambleImages(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=[\"pyra\", \"pyraminx\", \"mega\", \"megaminx\", \"skoob\", \"skewb\", \"sq1\", \"squareone\", \"squan\", \"cloncc\", \"clock\"])\n async def nonNXN(self, ctx):\n\n puzzle = ctx.message.content.split()[1]\n\n scrambleGen = {\n \"pyra\": (pyraminxScrambler.get_WCA_scramble, pyraimage), \n \"pyraminx\": (pyraminxScrambler.get_WCA_scramble, pyraimage), \n \"mega\": (megaminxScrambler.get_WCA_scramble, megaimage), \n \"megaminx\": (megaminxScrambler.get_WCA_scramble, megaimage), \n \"skoob\": (skewbScrambler.get_WCA_scramble, skewbimage), \n \"skewb\": (skewbScrambler.get_WCA_scramble, skewbimage),\n \"sq1\": (squareOneScrambler.get_WCA_scramble, squanimage), \n \"squareone\": (squareOneScrambler.get_WCA_scramble, squanimage), \n \"squan\": (squareOneScrambler.get_WCA_scramble, squanimage), \n \"cloncc\": (clockScrambler.get_WCA_scramble, clockimage), \n \"clock\": (clockScrambler.get_WCA_scramble, clockimage)\n }\n\n scramble = scrambleGen[puzzle][0]()\n\n msg = await ctx.send(scramble)\n await msg.add_reaction('๐Ÿ‘€') \n\n def check(reaction, user):\n return user == ctx.message.author and str(reaction.emoji) == '๐Ÿ‘€' and reaction.message.id == msg.id\n\n try:\n reaction, user = await self.client.wait_for('reaction_add', timeout=15, check=check)\n except asyncio.TimeoutError:\n await msg.clear_reactions()\n else:\n file = scrambleGen[puzzle][1](scramble)\n file.save(\"scramble.png\")\n await ctx.send(file = discord.File(\"scramble.png\"))\n await msg.clear_reactions()\n\n @commands.command(aliases=['2', '2x2', '3', '3x3', '4', '4x4', '5', '5x5', '6', '6x6', '7', '7x7'])\n async def _xyz(self, ctx):\n layers = ctx.message.content.split()[1][0]\n\n scrambleGen = {\n \"2\": scrambler222.get_WCA_scramble, \n \"3\": scrambler333.get_WCA_scramble, \n \"4\": scrambler444.get_WCA_scramble, \n \"5\": scrambler555.get_WCA_scramble, \n \"6\": scrambler666.get_WCA_scramble, \n \"7\": scrambler777.get_WCA_scramble, \n }\n\n scramble = scrambleGen[layers]()\n\n msg = await ctx.send(scramble)\n await msg.add_reaction('๐Ÿ‘€') \n\n def check(reaction, user):\n return user == ctx.message.author and str(reaction.emoji) == '๐Ÿ‘€' and reaction.message.id == msg.id\n\n try:\n reaction, user = await self.client.wait_for('reaction_add', timeout=15, check=check)\n except asyncio.TimeoutError:\n await msg.clear_reactions()\n else:\n file = scrambleimage(int(layers), scramble)\n file.save(\"scramble.png\")\n await ctx.send(file = discord.File(\"scramble.png\"))\n await msg.clear_reactions()\n\n\ndef setup(client):\n client.add_cog(ScrambleImages(client))\n\n" }, { "alpha_fraction": 0.5286778211593628, "alphanum_fraction": 0.5741154551506042, "avg_line_length": 46.114036560058594, "blob_id": "d19dd03f7361f8b0c7f1753ae1297fcd0d45eb19", "content_id": "31655bc7310fb1768b89a24251433665563d75a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5370, "license_type": "no_license", "max_line_length": 137, "num_lines": 114, "path": "/clockimage.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "from PIL import Image, ImageDraw # only import\n\ndef clockimage(scramble): # scramble goes here and is split here\n scramble = scramble.split()\n \n front = [0,0,0,0,0,0,0,0,0] \n back = [0,0,0,0,0,0,0,0,0] # lists for the front and back faces of the clock\n current, other = front, back # variables to help determine which face is the \"current\" one with moves being done to it\n\n corners = [0,2,6,8]\n mirrored = [2,0,8,6] \n # lists for the corners to update them after each move, as they are connected in the real puzzle\n # the reason why there are two is because the corners are mirrored\n # maybe mirror everything after instead of this solution??\n UL = [0,1,3,4]\n U = [0,1,2,3,4,5]\n UR = [1,2,4,5]\n R = [1,2,4,5,7,8]\n DR = [4,5,7,8]\n D = [3,4,5,6,7,8]\n DL = [3,4,6,7]\n L = [0,1,3,4,6,7]\n ALL = [0,1,2,3,4,5,6,7,8] # lists for which mini-clocks are affected by the turns\n\n moveMap = {\n \"UL\":UL, \"U\":U, \"UR\":UR, \"R\":R, \"DR\":DR, \"D\":D, \"DL\":DL, \"L\":L, \"ALL\":ALL\n }\n # dictionary for which list to use for the turn\n pins = {\n \"UL\":False, \"UR\":False, \"DL\":False, \"DR\":False\n }\n # used to determine the state of the pins (up/down)\n # maybe there is better way of doing this??\n for move in scramble:\n if len(move) == 2: # if the move is either y2 or one of the the pins going up/down\n if move == \"y2\":\n if current == front:\n current, other = back, front # switches the current face from back to front and vice versa\n else:\n current, other = front, back\n else:\n pins[move] = True \n # this assumes that a pin was the move and it is now being pressed up\n # need to account for multiple pin moves as it will not stay True or False\n # currently not an issue due to the way the scrambles are for clocks\n continue\n \n dial = move[:-2] # which dials one would rotate for the move\n turn = int(move[-2]) # how many hours to turn\n direction = move[-1] # which direction to turn those hours\n if direction == \"-\": turn = -turn # did this so I didn't have to do -= decrement later\n \n for a in moveMap[dial]: # for each mini-clock in corresponding move/dials, turn by correct amount of hours\n current[a] += turn\n\n for i in range(len(current)): # keeps the numbers between [1, 12]\n while current[i] < 0:\n current[i] += 12\n while current[i] > 12:\n current[i] -= 12\n \n for i in range(4): # updates the corners as they are connected\n other[corners[i]] = 12 - current[mirrored[i]]\n\n def hour_hand(degrees: int, fill: str, outline: str): \n # function to draw the hour hand for the image\n # did this so I didn't have to do math to determine exact coordinates as this just rotates the image and pastes it to final image\n hour = Image.new('RGBA', (150, 150), (255, 255, 255, 0))\n draw = ImageDraw.Draw(hour)\n\n draw.ellipse((0,0,150,150), fill=fill, outline=outline, width=65)\n draw.polygon((65,75,75,0,85,75), fill=fill)\n\n hour = hour.rotate(-degrees) # default goes counter clockwise so I needed to change it to clockwise\n return hour\n\n img = Image.new('RGBA', (1125, 550), (255, 255, 255, 0)) # setting up canvas\n draw = ImageDraw.Draw(img)\n\n x, y, z = 25, 25, 0 # mini-clock 1 on front side starts at 25, 25; z is an iterator\n handColor, bgColor, face = \"black\", \"white\", current # color of the hour hand, color of background, face being iterated\n for i in range(2): # does twice because there are two faces\n for j in range(3): # three rows\n for k in range(3): # three columns\n img.paste(hour_hand(face[z]*30, handColor, bgColor), (x,y,x+150,y+150)) # each mini-clock is 150x150\n x += 175 # gap between mini-clocks of 25\n z += 1 # increments to next element in current face\n x -= 525 # resets x to 25\n y += 175 # drops down a row\n x, y, z = 600, 25, 0 # coordinates of second clock face\n handColor, bgColor, face = \"white\", \"black\", other # swaps colors and face\n\n # drawing the pins\n x, y, z = 165, 165, 0 # starts at 165, 165\n pinBool = True # if pin is True/up, it will be colored white with outline of black and vice versa\n pinVals = list(pins.values()) # list of the states of the pins\n for i in range(2):\n for j in range(2):\n for k in range(2):\n if pinVals[z] is pinBool:\n inner, outer = \"white\", \"black\"\n else:\n inner, outer = \"black\", \"white\"\n draw.ellipse((x,y,x+45,y+45), fill=inner, outline=outer, width=2)\n x += 175 # same gap as between mini-clocks\n z += 1\n x -= 350 # resets x to 165\n y += 175 # drops down a row\n x, y, z = 740, 165, 0 # coordinates of other face's pins\n pinBool = False # pins must be inverted in colors\n pinVals[0], pinVals[1] = pinVals[1], pinVals[0]\n pinVals[2], pinVals[3] = pinVals[3], pinVals[2] # pins must also be flipped along the y-axis\n \n return img # show the image" }, { "alpha_fraction": 0.6836460828781128, "alphanum_fraction": 0.6863270998001099, "avg_line_length": 23.933332443237305, "blob_id": "09603e3cf7096b0c0438490fdb8b6afe6f7bd477", "content_id": "f6cfee20ba32640167b3b8fd36c452b813b31373", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 62, "num_lines": 15, "path": "/main.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "from discord.ext import commands\nfrom dotenv import load_dotenv\nimport keep_alive\nimport os\n\nload_dotenv()\nbot = commands.Bot(command_prefix = ['plz ', 'Plz ', 'PLZ ']) \nbot.remove_command('help')\n\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n bot.load_extension(f'cogs.{filename[:-3]}')\n\nkeep_alive.keep_alive()\nbot.run(os.getenv(\"TOKEN\"))" }, { "alpha_fraction": 0.6572327017784119, "alphanum_fraction": 0.6572327017784119, "avg_line_length": 21.678571701049805, "blob_id": "f3fb809ff0b5f556432352cfe818239100ff9281", "content_id": "dff9031d6a13e9384060d36264035be5f447b37b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/cogs/events.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import CommandNotFound\n\n\nclass Events(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('Bot is online.')\n\n @commands.Cog.listener()\n async def on_message(self, message):\n\n if message.author.id == self.client.user.id:\n return\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n if isinstance(error, CommandNotFound): return\n raise error\n\n\ndef setup(client):\n client.add_cog(Events(client))\n\n" }, { "alpha_fraction": 0.746835470199585, "alphanum_fraction": 0.7848101258277893, "avg_line_length": 77, "blob_id": "db896f0bcd47b0d68bb421067bfe327fad337ce0", "content_id": "ab68dbc85d21fd891e35517d3f67cfa30652b8e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 77, "num_lines": 1, "path": "/README.md", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "Code for Scrambly Boi, a cubing bot with over almost 900 servers and growing!\n\n" }, { "alpha_fraction": 0.5926054120063782, "alphanum_fraction": 0.6147201061248779, "avg_line_length": 27.097087860107422, "blob_id": "73f8b3beaf1b4ab75f669058ad6058a06412c75f", "content_id": "051c204b7b737504491d25da629de298cf172c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2902, "license_type": "no_license", "max_line_length": 203, "num_lines": 103, "path": "/archive.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "# for on_message event 3bld/4bld/5bld\n\nscrambleCommands = {\n 'plz 3bld': scrambler333.get_3BLD_scramble,\n 'plz 4bld': scrambler444.get_4BLD_scramble,\n 'plz 5bld': scrambler555.get_5BLD_scramble,\n}\n\n# ---\nchannel = message.channel\nmessageSplit = message.content.lower().split()\n\nif message.author.id == bot.user.id:\n\treturn\n\ntry:\n\tscrambleCommands[(messageSplit[0] + ' ' + messageSplit[1])]\nexcept:\n\treturn\n\nif len(messageSplit) > 2:\n\tamt = int(messageSplit[-1])\n\tif amt > 5: amt = 5\nelse:\n\tamt = 1\n\nfor i in range(amt):\n\tembed = discord.Embed(title = '', description = scrambleCommands[(messageSplit[0] + ' ' + messageSplit[1])](), color = 0x43a8ff)\n\tawait channel.send(embed = embed)\n\n\n# old show command only used for 3x3\[email protected](pass_context=True)\nasync def show(self, ctx):\n\tformattedstr = []\n\tremovedslash = []\n\tmsg = ctx.message.content[9:]\n\n\tfor line in msg.splitlines():\n\t\tslashes = [i for i in range(len(line)) if line.startswith('//', i)]\n\t\tif len(slashes) > 0:\n\t\t\tr = line.replace(line[slashes[0]:],'')\n\t\t\tremovedslash.append(r)\n\t\telse:\n\t\t\tremovedslash.append(line)\n\n\tmsg = ' '.join(removedslash)\n\tmsg = msg.replace(\"โ€™\", \"'\").replace(\"(\", '').replace(\")\", '').replace(' ', '')\n\tmsg = msg.replace('Rw','r').replace(\"Lw\",'l').replace(\"Uw\",'u').replace(\"Dw\",'d').replace(\"Fw\",'f').replace(\"Bw\",'b')\n\tif len(msg) > 0:\n\t\tm = len(msg) - 1\n\t\twhile m > -1: \n\t\t\tif msg[m] == \"'\":\n\t\t\t\tif msg[m-1] == \"2\":\n\t\t\t\t\tformattedstr.append(msg[m-2] + msg[m-1] + msg[m])\n\t\t\t\t\tm -= 3\n\t\t\t\telif msg[m-1] in acceptedmoves:\n\t\t\t\t\tformattedstr.append(msg[m-1] + msg[m])\n\t\t\t\t\tm -= 2\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send('invalid notation dud1')\n\t\t\t\t\treturn\n\t\t\telif msg[m] == \"2\":\n\t\t\t\tif msg[m-1] in acceptedmoves:\n\t\t\t\t\tformattedstr.append(msg[m-1] + msg[m])\n\t\t\t\t\tm -= 2\n\t\t\t\telif msg[m-1] == 'w':\n\t\t\t\t\tawait ctx.send('use lowercase notation for wide moves plz')\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send('invalid notation dud2')\n\t\t\t\t\treturn\n\t\t\telif msg[m] in acceptedmoves:\n\t\t\t\tformattedstr.append(msg[m])\n\t\t\t\tm -= 1\n\t\t\telse:\n\t\t\t\tawait ctx.send('invalid notation dud')\n\t\t\t\treturn\n\telse: \n\t\tawait ctx.send(\"nothing to show bruh (e.g, \\\"plz show R U R' U'\\\")\")\n\t\treturn\n\n\tmsg = formattedstr[::-1]\n\tmsg.append(' ')\n\tbase = await ctx.send(' '.join(msg) + '\\n' + input3([]))\n\tawait base.add_reaction('๐Ÿ‘€')\n\t\n\tdef check(reaction, user):\n\t\treturn user == ctx.author and str(reaction.emoji) == '๐Ÿ‘€'\n\n\tmovecounter = 1\n\tfor move in msg:\n\t\ttry:\n\t\t\treaction, user = await self.client.wait_for('reaction_add', timeout = 1, check=check)\n\t\texcept:\n\t\t\tawait base.edit(content= str(' '.join(msg[:movecounter-1])) + ' ' + \"**{}**\".format(''.join(str(msg[movecounter-1]))) + ' ' + str(' '.join(msg[movecounter:])) + ' ' + '\\n' + input3(msg[:movecounter]))\n\t\t\tmovecounter += 1\n\t\telse:\n\t\t\tawait base.edit(content = ' '.join(msg) + '\\n' + input3(msg))\n\t\t\tawait base.clear_reactions()\n\t\t\treturn\n\n\tawait base.clear_reactions()\t" }, { "alpha_fraction": 0.2515592575073242, "alphanum_fraction": 0.3251378536224365, "avg_line_length": 18.075862884521484, "blob_id": "a33a5cf59ffacc9b66d252df777485e72b2a945e", "content_id": "d357d26bc35287a88e353c07c0cc7eff10561a71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11063, "license_type": "no_license", "max_line_length": 92, "num_lines": 580, "path": "/scrambles.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import random\n\n# Lists for scrambling - not random state but it is fast\nURF = ['U_', 'R_', 'F_']\nUD = ['U_', 'D_']\nRL = ['R_', 'L_']\nFB = ['F_', 'B_']\nUDRLFB = [['U_', 'D_'], ['R_', 'L_'], ['F_', 'B_']]\nspecification = ['', '\\'', '2']\nacceptedmoves = ['U','D','R','L','F','B','x','y','z','M','E','S','u','d','r','l','f','b']\nXYZ = ['x_', 'y_', 'z_']\n\n# Emotes that make up the scramble images - much nicer than the default color squares emojis\n# white = '<:wt:710663779280486440>'\nwhite = ':white_large_square:'\nyello = '<:ys:709901846399156324>'\ngreen = '<:gr:710283142396641352>'\nbluee = '<:bl:710286003386056716>'\norang = '<:or:710664059741012009>'\nreeed = '<:rd:710664221045686273>'\n\n#____________________________________________\ndef R():\n w[3], g[3], y[3], b[3] = g[3], y[3], b[3], w[3]\n w[6], g[6], y[6], b[6] = g[6], y[6], b[6], w[6]\n w[9], g[9], y[9], b[9] = g[9], y[9], b[9], w[9]\n\n r[1], r[3], r[9], r[7] = r[7], r[1], r[3], r[9]\n r[2], r[6], r[8], r[4] = r[4], r[2], r[6], r[8]\n\n\ndef Rp():\n R(), R(), R()\n\n\ndef R2():\n R(), R()\n\n\ndef L():\n w[1], g[1], y[1], b[1] = b[1], w[1], g[1], y[1]\n w[4], g[4], y[4], b[4] = b[4], w[4], g[4], y[4]\n w[7], g[7], y[7], b[7] = b[7], w[7], g[7], y[7]\n\n o[1], o[3], o[9], o[7] = o[7], o[1], o[3], o[9]\n o[2], o[6], o[8], o[4] = o[4], o[2], o[6], o[8]\n\n\ndef Lp():\n L(), L(), L()\n\n\ndef L2():\n L(), L()\n\n\ndef U():\n g[1], r[1], b[9], o[1] = r[1], b[9], o[1], g[1]\n g[2], r[2], b[8], o[2] = r[2], b[8], o[2], g[2]\n g[3], r[3], b[7], o[3] = r[3], b[7], o[3], g[3]\n\n w[1], w[3], w[9], w[7] = w[7], w[1], w[3], w[9]\n w[2], w[6], w[8], w[4] = w[4], w[2], w[6], w[8]\n\n\ndef Up():\n U(), U(), U()\n\n\ndef U2():\n U(), U()\n\n\ndef D():\n g[7], r[7], b[3], o[7] = o[7], g[7], r[7], b[3]\n g[8], r[8], b[2], o[8] = o[8], g[8], r[8], b[2]\n g[9], r[9], b[1], o[9] = o[9], g[9], r[9], b[1]\n\n y[1], y[3], y[9], y[7] = y[7], y[1], y[3], y[9]\n y[2], y[6], y[8], y[4] = y[4], y[2], y[6], y[8]\n\n\ndef Dp():\n D(), D(), D()\n\n\ndef D2():\n D(), D()\n\n\ndef F():\n w[7], r[1], y[3], o[9] = o[9], w[7], r[1], y[3]\n w[8], r[4], y[2], o[6] = o[6], w[8], r[4], y[2]\n w[9], r[7], y[1], o[3] = o[3], w[9], r[7], y[1]\n\n g[1], g[3], g[9], g[7] = g[7], g[1], g[3], g[9]\n g[2], g[6], g[8], g[4] = g[4], g[2], g[6], g[8]\n\n\ndef Fp():\n F(), F(), F()\n\n\ndef F2():\n F(), F()\n\n\ndef B():\n w[1], r[3], y[9], o[7] = r[3], y[9], o[7], w[1]\n w[2], r[6], y[8], o[4] = r[6], y[8], o[4], w[2]\n w[3], r[9], y[7], o[1] = r[9], y[7], o[1], w[3]\n\n b[1], b[3], b[9], b[7] = b[7], b[1], b[3], b[9]\n b[2], b[6], b[8], b[4] = b[4], b[2], b[6], b[8]\n\n\ndef Bp():\n B(), B(), B()\n\n\ndef B2():\n B(), B()\n\n\ndef M():\n g[2], y[2], b[2], w[2] = w[2], g[2], y[2], b[2]\n g[5], y[5], b[5], w[5] = w[5], g[5], y[5], b[5]\n g[8], y[8], b[8], w[8] = w[8], g[8], y[8], b[8]\n\n\ndef Mp():\n M(), M(), M()\n\n\ndef M2():\n M(), M()\n\n\ndef E():\n g[4], r[4], b[6], o[4] = o[4], g[4], r[4], b[6]\n g[5], r[5], b[5], o[5] = o[5], g[5], r[5], b[5]\n g[6], r[6], b[4], o[6] = o[6], g[6], r[6], b[4]\n\n\ndef Ep():\n E(), E(), E()\n\n\ndef E2():\n E(), E()\n\n\ndef S():\n w[4], r[2], y[6], o[8] = o[8], w[4], r[2], y[6]\n w[5], r[5], y[5], o[5] = o[5], w[5], r[5], y[5]\n w[6], r[8], y[4], o[2] = o[2], w[6], r[8], y[4]\n\n\ndef Sp():\n S(), S(), S()\n\n\ndef S2():\n S(), S()\n\n\ndef X():\n Mp(), R(), Lp()\n\n\ndef Xp():\n M(), Rp(), L()\n\n\ndef X2():\n X(), X()\n\n\ndef Y():\n Ep(), U(), Dp()\n\n\ndef Yp():\n E(), Up(), D()\n\n\ndef Y2():\n Y(), Y()\n\n\ndef Z():\n S(), F(), Bp()\n\n\ndef Zp():\n Sp(), Fp(), B()\n\n\ndef Z2():\n Z(), Z()\n\n\ndef uw():\n U(), Ep()\n\n\ndef up():\n Up(), E()\n\n\ndef u2():\n uw(), uw()\n\n\ndef dw():\n D(), E()\n\n\ndef dp():\n Dp(), Ep()\n\n\ndef d2():\n dw(), dw()\n\n\ndef rw():\n R(), Mp()\n\n\ndef rp():\n Rp(), M()\n\n\ndef r2():\n rw(), rw()\n\n\ndef lw():\n L(), M()\n\n\ndef lp():\n Lp(), Mp()\n\n\ndef l2():\n lw(), lw()\n\n\ndef fw():\n F(), S()\n\n\ndef fp():\n Fp(), Sp()\n\n\ndef f2():\n fw(), fw()\n\n\ndef bw():\n B(), Sp()\n\n\ndef bp():\n Bp(), S()\n\n\ndef b2():\n bw(), bw()\n\n\ndef no():\n pass\n\n\nsequence = {\n 'U': U, \"U'\": Up, 'U2': U2, 'U2\\'': U2,\n 'D': D, \"D'\": Dp, 'D2': D2, 'D2\\'': D2,\n 'R': R, \"R'\": Rp, 'R2': R2, 'R2\\'': R2,\n 'L': L, \"L'\": Lp, 'L2': L2, 'L2\\'': L2,\n 'F': F, \"F'\": Fp, 'F2': F2, 'F2\\'': F2,\n 'B': B, \"B'\": Bp, 'B2': B2, 'B2\\'': B2,\n 'M': M, \"M'\": Mp, 'M2': M2, 'M2\\'': M2,\n 'E': E, \"E'\": Ep, 'E2': E2, 'E2\\'': E2,\n 'S': S, \"S'\": Sp, 'S2': S2, 'S2\\'': S2,\n 'x': X, \"x'\": Xp, 'x2': X2, 'x2\\'': X2,\n 'y': Y, \"y'\": Yp, 'y2': Y2, 'y2\\'': Y2,\n 'z': Z, \"z'\": Zp, 'z2': Z2, 'z2\\'': Z2,\n 'u': uw, \"u'\": up, 'u2': u2, 'u2\\'': u2,\n 'd': dw, \"d'\": dp, 'd2': d2, 'd2\\'': d2,\n 'r': rw, \"r'\": rp, 'r2': r2, 'r2\\'': r2,\n 'l': lw, \"l'\": lp, 'l2': l2, 'l2\\'': l2,\n 'f': fw, \"f'\": fp, 'f2': f2, 'f2\\'': f2,\n 'b': bw, \"b'\": bp, 'b2': b2, 'b2\\'': b2,\n ' ': no\n}\n\n\ndef cubestring(msg):\n global w\n global o\n global g \n global r \n global b\n global y\n w = ['', 'U', 'U', 'U',\n 'U', 'U', 'U',\n 'U', 'U', 'U']\n y = ['', 'D', 'D', 'D',\n 'D', 'D', 'D',\n 'D', 'D', 'D']\n g = ['', 'F', 'F', 'F',\n 'F', 'F', 'F',\n 'F', 'F', 'F']\n b = ['', 'B', 'B', 'B',\n 'B', 'B', 'B',\n 'B', 'B', 'B']\n o = ['', 'L', 'L', 'L',\n 'L', 'L', 'L',\n 'L', 'L', 'L']\n r = ['', 'R', 'R', 'R',\n 'R', 'R', 'R',\n 'R', 'R', 'R']\n\n [sequence[z]() for z in msg]\n return (w+r+g+y+o+b[::-1])\n\ndef bld3(msg):\n global w\n global o\n global g \n global r \n global b\n global y\n w = ['', 'A', 'a', 'B',\n 'd', '1', 'b',\n 'D', 'c', 'C']\n o = ['', 'E', 'e', 'F',\n 'h', '2', 'f',\n 'H', 'g', 'G']\n g = ['', 'I', 'i', 'J',\n 'l', '3', 'j',\n 'L', 'k', 'K']\n r = ['', 'M', 'm', 'N',\n 'p', '4', 'n',\n 'P', 'o', 'O']\n b = ['', 'S', 's', 'T',\n 'r', '5', 't',\n 'R', 'q', 'Q']\n y = ['', 'U', 'u', 'V',\n 'x', '6', 'v',\n 'X', 'w', 'W']\n\n [sequence[z]() for z in msg]\n return (w+o+g+r+b+y)\n\n \ndef input3(msg):\n global w\n global o\n global g \n global r \n global b\n global y\n w = ['',white, white, white,\n white, white, white,\n white, white, white]\n y = ['',yello, yello, yello,\n yello, yello, yello,\n yello, yello, yello]\n g = ['',green, green, green,\n green, green, green,\n green, green, green]\n b = ['',bluee, bluee, bluee, \n bluee, bluee, bluee,\n bluee, bluee, bluee]\n o = ['',orang, orang, orang,\n orang, orang, orang,\n orang, orang, orang]\n r = ['',reeed, reeed, reeed,\n reeed, reeed, reeed,\n reeed, reeed, reeed]\n\n [sequence[z]() for z in msg]\n thescramble3 = (\n '_ _ '+ w[1]+ w[2]+ w[3]+'\\n'\n ' '+ w[4]+ w[5]+ w[6]+'\\n'\n ' '+ w[7]+ w[8]+ w[9]+'\\n'\n + o[1]+ o[2]+ o[3]+' '+g[1]+ g[2]+ g[3]+' '+r[1]+ r[2]+ r[3]+' '+b[9]+ b[8]+ b[7]+'\\n'\n + o[4]+ o[5]+ o[6]+' '+g[4]+ g[5]+ g[6]+' '+r[4]+ r[5]+ r[6]+' '+b[6]+ b[5]+ b[4]+'\\n'\n + o[7]+ o[8]+ o[9]+' '+g[7]+ g[8]+ g[9]+' '+r[7]+ r[8]+ r[9]+' '+b[3]+ b[2]+ b[1]+'\\n' \n ' ' +y[1]+ y[2]+ y[3]+'\\n'\n ' ' +y[4]+ y[5]+ y[6]+'\\n'\n ' ' +y[7]+ y[8]+ y[9]+'\\n'\n )\n return thescramble3\n\ndef scramble2(msg):\n w = ['',white, white, white, white]\n y = ['',yello, yello, yello, yello]\n g = ['',green, green, green, green]\n b = ['',bluee, bluee, bluee, bluee]\n o = ['',orang, orang, orang, orang]\n r = ['',reeed, reeed, reeed, reeed]\n def R():\n w[2], g[2], y[2], b[2] = g[2], y[2], b[2], w[2] \n w[4], g[4], y[4], b[4] = g[4], y[4], b[4], w[4]\n\n r[1], r[2], r[4], r[3] = r[3], r[1], r[2], r[4]\n\n def Rp():\n R(), R(), R()\n\n def R2():\n R(), R()\n\n def L():\n w[1], g[1], y[1], b[1] = b[1], w[1], g[1], y[1]\n w[3], g[3], y[3], b[3] = b[3], w[3], g[3], y[3]\n\n o[1], o[2], o[4], o[3] = o[3], o[1], o[2], o[4]\n \n def Lp():\n L(), L(), L()\n\n def U():\n g[1], r[1], b[4], o[1] = r[1], b[4], o[1], g[1]\n g[2], r[2], b[3], o[2] = r[2], b[3], o[2], g[2]\n\n w[1], w[2], w[4], w[3] = w[3], w[1], w[2], w[4]\n\n def Up():\n U(), U(), U()\n\n def U2():\n U(), U()\n\n def D():\n g[3], r[3], b[2], o[3] = o[3], g[3], r[3], b[2]\n g[4], r[4], b[1], o[4] = o[4], g[4], r[4], b[1]\n \n y[1], y[2], y[4], y[3] = y[3], y[1], y[2], y[4]\n\n def Dp():\n D(), D(), D()\n\n def F():\n w[3], r[1], y[2], o[4] = o[4], w[3], r[1], y[2]\n w[4], r[3], y[1], o[2] = o[2], w[4], r[3], y[1]\n\n g[1], g[2], g[4], g[3] = g[3], g[1], g[2], g[4]\n\n def Fp():\n F(), F(), F()\n\n def F2():\n F(), F()\n\n def B():\n w[1], r[2], y[4], o[3] = r[2], y[4], o[3], w[1]\n w[2], r[4], y[3], o[1] = r[4], y[3], o[1], w[2]\n\n b[1], b[2], b[4], b[3] = b[3], b[1], b[2], b[4]\n\n def Bp():\n B(), B(), B()\n\n def X():\n R(), Lp()\n\n def Xp():\n Rp(), L()\n\n def X2():\n X(), X()\n\n def Y():\n U(), Dp()\n\n def Yp():\n Up(), D()\n\n def Y2():\n Y(), Y()\n\n def Z():\n F(), Bp()\n \n def Zp():\n Fp(), B()\n \n def Z2():\n Z(), Z()\n\n sequence = {\n 'U':U, \"U'\":Up, 'U2':U2,\n 'D':D, \"D'\":Dp,\n 'R':R, \"R'\":Rp, 'R2':R2,\n 'L':L, \"L'\":Lp,\n 'F':F, \"F'\":Fp, 'F2':F2,\n 'B':B, \"B'\":Bp,\n 'x':X, \"x'\":Xp, \"x2\":X2,\n 'y':Y, \"y'\":Yp, \"y2\":Y2,\n 'z':Z, \"z'\":Zp, \"z2\":Z2\n } \n\n [sequence[z]() for z in msg]\n return (\n '_ _ ' +w[1]+ w[2]+\n '\\n'+ '_ _ ' +w[3]+ w[4]+\n '\\n'+ o[1]+ o[2]+' '+g[1]+ g[2]+' '+r[1]+ r[2]+' '+b[4]+ b[3]+ \n '\\n'+ o[3]+ o[4]+' '+g[3]+ g[4]+' '+r[3]+ r[4]+' '+b[2]+ b[1]+\n '\\n'+ '_ _ ' +y[1]+ y[2]+\n '\\n'+ '_ _ ' +y[3]+ y[4] )\n \n\ndef scramble1():\n w = [white] \n o = [orang] \n g = [green] \n b = [bluee] \n r = [reeed] \n ye = [yello]\n\n def y(): \n g[0], r[0], b[0], o[0] = r[0], b[0], o[0], g[0]\n\n def yp():\n y(), y(), y()\n\n def y2():\n y(), y()\n\n def x():\n w[0], g[0], ye[0], b[0] = g[0], ye[0], b[0], w[0]\n\n def xp():\n x(), x(), x()\n\n def x2():\n x(), x()\n\n def z():\n w[0], r[0], ye[0], o[0] = o[0], w[0], r[0], ye[0]\n\n def zp():\n z(), z(), z()\n\n def z2():\n z(), z()\n\n sequence = {\n 'y':y, \"y'\":yp, 'y2':y2, \n 'x':x, \"x'\":xp, 'x2':x2,\n 'z':z, \"z'\":zp, 'z2':z2}\n\n fs = ['']\n while len(fs) < 10:\n fs.append(random.choice(XYZ))\n if fs[-1] == fs[-2]:\n del fs[-1]\n\n for t in XYZ:\n if t not in fs:\n fs[random.randint(1,10)] = t\n\n del fs[0]\n fs = [item.replace('_', str(random.choice(specification))) for item in fs]\n\n [sequence[m]() for m in fs]\n thescramble1 = (\n '_ _ ' + w[0] + '\\n' +\n o[0] + g[0] + r[0] + b[0] + '\\n' +\n '_ _ ' + ye[0]\n )\n\n return ' '.join(fs) + '\\n' + thescramble1" }, { "alpha_fraction": 0.5485678911209106, "alphanum_fraction": 0.5628891587257385, "avg_line_length": 29.884614944458008, "blob_id": "bbe3fee68781020b53f07d7495dff279114e9775", "content_id": "f14c8c596992237068306b3c71e2c2c4c265dbe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 196, "num_lines": 52, "path": "/cogs/showScramble.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom clockimage import *\nfrom megaimage import *\nfrom pyraminximage import *\nfrom scrambleupdate import *\nfrom skewbimage import *\nfrom squareoneimage import *\n\n\n\nclass ShowScramble(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def show(self, ctx, puzzle):\n\n puzzles = [\"pyra\", \"pyraminx\", \"mega\", \"megaminx\", \"skoob\", \"skewb\", \"sq1\", \"squareone\", \"squan\", \"cloncc\", \"clock\", '2', '2x2', '3', '3x3', '4', '4x4', '5', '5x5', '6', '6x6', '7', '7x7']\n\n scrambleGen = {\n \"pyra\": pyraimage, \"pyraminx\": pyraimage, \n \"mega\": megaimage, \"megaminx\": megaimage, \n \"skoob\": skewbimage, \"skewb\": skewbimage,\n \"sq1\": squanimage, \"squareone\": squanimage, \"squan\": squanimage, \n \"cloncc\": clockimage, \"clock\": clockimage,\n }\n\n if puzzle in puzzles:\n if puzzle[0].isdigit():\n layers = puzzle[0]\n else:\n layers = False\n else:\n return\n\n scramble = \" \".join(ctx.message.content.split()[3:])\n\n try:\n if layers:\n file = scrambleimage(int(layers), scramble)\n elif layers == False:\n file = scrambleGen[puzzle](scramble)\n file.save(\"showscramble.png\")\n await ctx.send(scramble)\n await ctx.send(file = discord.File(\"showscramble.png\"))\n except:\n await ctx.send(\"something went wrong\")\n\n\ndef setup(client):\n client.add_cog(ShowScramble(client))\n" }, { "alpha_fraction": 0.4397263526916504, "alphanum_fraction": 0.5048360228538513, "avg_line_length": 28.636363983154297, "blob_id": "e54c885c22070d07231a57c5edd76e519b77dd1f", "content_id": "a7abbdafd79cd2e697b7278bcdcb37533d1823f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4239, "license_type": "no_license", "max_line_length": 139, "num_lines": 143, "path": "/skewbimage.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image, ImageDraw\n\ndef skewbimage(scramble):\n scramble = scramble.split()\n\n faceU = np.repeat(\"W\", 5)\n faceD = np.repeat(\"Y\", 5)\n faceR = np.repeat(\"R\", 5)\n faceL = np.repeat(\"O\", 5)\n faceF = np.repeat(\"G\", 5)\n faceB = np.repeat(\"B\", 5)\n\n rR = (1,2,3,4)\n rB = (4,2,0,3)\n lL = (3,2,1,0)\n bB = (0,2,4,1)\n\n colors = {\n \"R\": (200, 0, 0),\n \"O\": (241,147,1),\n \"W\": (255, 255, 255), \n \"Y\": (245, 238, 0),\n \"G\": (81, 227, 0),\n \"B\": (3, 132, 252)\n }\n\n hyp = int(125*(3**.5))\n\n\n def R():\n faceU[1], faceF[4], faceL[3] = faceF[4], faceL[3], faceU[1]\n for i in range(4):\n faceR[rR[i]], faceB[rB[i]], faceD[rR[i]] = faceD[rR[i]], faceR[rR[i]], faceB[rB[i]]\n\n\n def L():\n faceU[3], faceR[3], faceB[4] = faceB[4], faceU[3], faceR[3]\n for i in range(4):\n faceF[rB[i]], faceD[lL[i]], faceL[rR[i]] = faceL[rR[i]], faceF[rB[i]], faceD[lL[i]]\n\n\n def U():\n faceF[0], faceR[1], faceD[3] = faceR[1], faceD[3], faceF[0]\n for i in range(4):\n faceU[lL[i]], faceL[lL[i]], faceB[bB[i]] = faceB[bB[i]], faceU[lL[i]], faceL[lL[i]]\n\n\n def B():\n faceU[0], faceF[3], faceR[4] = faceR[4], faceU[0], faceF[3]\n for i in range(4):\n faceL[rB[i]], faceD[rB[i]], faceB[rR[i]] = faceB[rR[i]], faceL[rB[i]], faceD[rB[i]]\n\n\n def draw_face(face, degrees: int = 0):\n hyp = int(125*(3**.5))\n\n\n def draw_diamond(fill):\n hyp = int(125*(3**.5))\n width = 500\n height = 250\n diamond = Image.new('RGBA', (width, height), (255,255, 255, 0))\n draw = ImageDraw.Draw(diamond)\n draw.polygon((width/2, 0, width/2 - hyp/2, height/4, width/2, height/2, width/2 + hyp/2, height/4), fill=fill, outline=\"black\")\n return diamond\n\n def draw_rectangle(fill):\n hyp = int(125*(3**.5))\n width = 500\n height = 250\n rectangle = Image.new('RGBA', (width, height), (255,255, 255, 0))\n draw = ImageDraw.Draw(rectangle)\n draw.rectangle((width/2 - hyp/2, height/4, width/2 + hyp/2, height*.75), fill=fill, outline=\"black\")\n return rectangle\n\n width = 500\n height = 515\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0)) \n\n x = 0\n y = 0\n img.paste(draw_diamond(colors[face[0]]), (x, y), mask=draw_diamond(colors[face[0]]))\n\n x += int(hyp/2)\n y += 63\n img.paste(draw_diamond(colors[face[1]]), (x, y), mask=draw_diamond(colors[face[0]]))\n\n x -= hyp\n img.paste(draw_diamond(colors[face[3]]), (x, y), mask=draw_diamond(colors[face[0]]))\n\n x += int(hyp/2)\n y += 63\n img.paste(draw_diamond(colors[face[4]]), (x, y), mask=draw_diamond(colors[face[0]]))\n \n x = 0\n y = 0\n img.paste(draw_rectangle(colors[face[2]]), (x,y), mask=draw_rectangle(colors[face[2]]))\n\n img = img.rotate(degrees)\n\n return img\n\n moveMap = {\n \"R\": R, \"L\":L, \"U\":U, \"B\":B\n }\n\n turns = [\"U\",\"R\",\"L\",\"B\"]\n for move in scramble:\n if move[-1] == \"'\":\n move = move[:-1]\n spec = 2\n else:\n spec = 1\n\n for i in range(spec):\n moveMap[move]()\n\n LFDorder = (3,0,2,4,1)\n faceL = [faceL[i] for i in LFDorder]\n faceF = [faceF[i] for i in LFDorder]\n faceD = [faceD[i] for i in LFDorder]\n faceR = faceR[::-1]\n faceB = faceB[::-1]\n\n\n width = 950\n height = 775\n middle = int((width-500)/2)\n\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0)) \n draw = ImageDraw.Draw(img)\n\n img.paste(draw_face(faceU, 0), (middle, 0), mask=draw_face(faceU, 0))\n\n img.paste(draw_face(faceL, 120), (middle - hyp - 15, -130), mask=draw_face(faceR, 120))\n img.paste(draw_face(faceF, 120), (middle, 0), mask=draw_face(faceF, 120))\n img.paste(draw_face(faceD, 120), (middle, 265), mask=draw_face(faceR, 120))\n\n img.paste(draw_face(faceR, 240), (middle, 0), mask=draw_face(faceR, 240))\n img.paste(draw_face(faceB, 240), (middle + hyp + 15, -130), mask=draw_face(faceR, 240))\n\n return img\n\n" }, { "alpha_fraction": 0.37187230587005615, "alphanum_fraction": 0.4302559792995453, "avg_line_length": 23.83571434020996, "blob_id": "074f7bcad383d29242e963122358d320e35bb6cc", "content_id": "4617b60e4b2095a450cd34327ffd27d57015cd4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3477, "license_type": "no_license", "max_line_length": 127, "num_lines": 140, "path": "/pyraminximage.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image, ImageDraw\n\ndef pyraimage(scramble):\n scramble = scramble.split()\n\n gr = np.repeat(\"G\", 9)\n re = np.repeat(\"R\", 9)\n bl = np.repeat(\"B\", 9)\n ye = np.repeat(\"Y\", 9)\n\n rAxis = (3,6,7,8)\n lAxis = (6,1,5,4)\n\n rAlt = (8,3,7,6)\n lAlt = (4,6,5,1)\n\n def U():\n for i in range(4):\n gr[i], re[i], bl[i] = bl[i], gr[i], re[i]\n\n def Up():\n U(), U()\n\n def u():\n gr[0], re[0], bl[0] = bl[0], gr[0], re[0]\n\n def up():\n u(), u()\n\n def R():\n for i in range(4):\n gr[rAxis[i]], bl[lAxis[i]], ye[lAxis[i]] = ye[lAxis[i]], gr[rAxis[i]], bl[lAxis[i]]\n\n def Rp():\n R(), R()\n\n def r():\n gr[8], bl[4], ye[4] = ye[4], gr[8], bl[4]\n\n def rp():\n r(), r()\n\n def L():\n for i in range(4):\n gr[lAxis[i]], ye[rAxis[i]], re[rAxis[i]] = re[rAxis[i]], gr[lAxis[i]], ye[rAxis[i]]\n\n def Lp():\n L(), L()\n\n def l():\n gr[4], ye[8], re[8] = re[8], gr[4], ye[4]\n\n def lp():\n l(), l()\n\n def B():\n for i in range(4):\n ye[i], bl[rAlt[i]], re[lAlt[i]] = re[lAlt[i]], ye[i], bl[rAlt[i]]\n\n def Bp():\n B(), B()\n\n def b():\n ye[0], bl[8], re[4] = re[4], ye[0], bl[8]\n\n def bp():\n b(), b()\n\n moveMap = {\n \"U\":U, \"U'\":Up, \"R\":R, \"R'\":Rp, \"L\":L, \"L'\":Lp, \"B\":B, \"B'\":Bp,\n \"u\":u, \"u'\":up, \"r\":r, \"r'\":rp, \"l\":l, \"l'\":lp, \"b\":b, \"b'\":bp\n }\n\n [moveMap[move]() for move in scramble]\n\n\n def draw_face(face: list, degrees: int):\n width = 400\n height = 400\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0)) \n draw = ImageDraw.Draw(img)\n\n colors = {\n \"R\": (200, 0, 0),\n \"G\": (81, 227, 0),\n \"B\": (3, 132, 252),\n \"Y\": (245, 238, 0)\n }\n\n def triangle(point: int, fill: str):\n degrees = 0 if point % 2 == 0 else 180\n \n base = 100\n height = int((base*(3**.5))/2)\n sticker = Image.new('RGBA', (base, height), (255, 255, 255, 0))\n draw = ImageDraw.Draw(sticker)\n \n draw.polygon((0,height,base*.5,0,base,height), fill=fill)\n sticker = sticker.rotate(degrees)\n\n return sticker\n\n tBase = 100\n tHeight = int((tBase*(3**.5))/2)\n x = int(width/2) - 50\n y = int(((height-(tHeight*3))/2)-tHeight/2)\n z = 1\n alpha = 0 \n beta = 0\n for i in range(3):\n for j in range(z):\n img.paste(triangle(alpha, colors[face[beta]]), (x, y), mask=triangle(alpha, \"red\"))\n x += int(tBase/2)\n alpha += 1\n beta += 1\n x -= 50*(z+1)\n y += tHeight\n z += 2\n alpha = 0\n\n outline = Image.open(\"WireframePyra.png\")\n img.paste(outline, (0,0), outline)\n img = img.rotate(-degrees)\n return img\n \n img = Image.new('RGBA', (800, 650), (255, 255, 255, 0)) \n draw = ImageDraw.Draw(img)\n\n faces = (re, gr, bl, ye)\n degrees = (60, 0, 300, 180)\n\n height = int((100*(3**.5))/2)\n coordinates = ((0,-75),(200,height-75),(400,-75),(200,height*3-25))\n\n for i in range(4):\n img.paste(draw_face(faces[i], degrees[i]), (coordinates[i][0],coordinates[i][1]), mask=draw_face(faces[i], degrees[i]))\n\n img = img.crop((40, 30, 760, 615))\n return img\n" }, { "alpha_fraction": 0.5717625021934509, "alphanum_fraction": 0.5745675563812256, "avg_line_length": 32.93650817871094, "blob_id": "db9ca155431ad0add762bdfc58114af91de1d393", "content_id": "e92f5ecf3a8b964fef04cdb815b1af0f08f2eafb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 113, "num_lines": 63, "path": "/cogs/queries.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport discord\nfrom discord.ext import commands\nimport requests\n\n\nclass Queries(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def tcs(self, ctx):\n links = []\n query = \"+\".join(ctx.message.content.split()[2:])\n if not query:\n await ctx.send(\"Make sure to enter a search query.\")\n return\n\n page = requests.get(\"https://www.thecubicle.com/search?type=product&q=\" + query)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n try:\n await ctx.send(\"https://www.thecubicle.com/\" + soup.find('a', class_='product-grid-item')['href'])\n except TypeError:\n await ctx.send(\"No results found...\")\n\n @commands.command()\n async def scss(self, ctx):\n links = []\n query = \"+\".join(ctx.message.content.split()[2:])\n if not query:\n await ctx.send(\"Make sure to enter a search query.\")\n return\n\n page = requests.get(\"https://www.speedcubeshop.com/search?type=product&q=\" + query)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n try:\n await ctx.send(\"https://www.speedcubeshop.com/\" + soup.find('a', class_='product-title')['href'])\n except TypeError:\n await ctx.send(\"No results found...\")\n\n @commands.command()\n async def wcaid(self, ctx):\n name = \"+\".join(ctx.message.content.split()[2:])\n if not name:\n await ctx.send(\"Be sure to enter a name to search up\")\n else:\n url = (\"https://www.worldcubeassociation.org/search?q=\" + str(name)).replace(\" \",\"%20\")\n page = requests.get(url)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n\n try:\n query = soup.find(\"table\",{\"class\":\"table table-nonfluid table-vertical-align-middle\"}).a[\"href\"]\n await ctx.send(\"https://www.worldcubeassociation.org\" + query)\n except:\n await ctx.send(\"No results found...\")\n\n\ndef setup(client):\n client.add_cog(Queries(client))\n\n" }, { "alpha_fraction": 0.5685279369354248, "alphanum_fraction": 0.6345177888870239, "avg_line_length": 19.736841201782227, "blob_id": "2815c21d6a99d1d0c917c880dcd0646dc08905a9", "content_id": "ac9284304765f7e053464a293392df9846ce693e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 394, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/pyproject.toml", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "[build-system]\nbuild-backend = \"poetry.masonry.api\"\nrequires = [\"poetry>=0.12\"]\n\n[tool]\n[tool.poetry]\nauthors = [\"Your Name <[email protected]>\"]\ndescription = \"\"\nname = \"scrambly-boi-py\"\nversion = \"0.1.0\"\n[tool.poetry.dependencies]\nbs4 = \"^0.0.1\"\nflask = \"^1.1\"\npython = \"^3.8\"\npytwistyscrambler = \"^1.2\"\nkociemba = \"^1.2.1\"\n\"discord.py\" = \"^1.3.4\"\npython-dotenv = \"^0.15.0\"\n[tool.poetry.dev-dependencies]\n" }, { "alpha_fraction": 0.4803476929664612, "alphanum_fraction": 0.5142353177070618, "avg_line_length": 37.911766052246094, "blob_id": "a61d54edb3fa8dc3d472058e6727c5879c73953e", "content_id": "be7dddf44a75f228bbcee54d19001e1107307a3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8031, "license_type": "no_license", "max_line_length": 164, "num_lines": 204, "path": "/cogs/virtualCubes.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import CommandNotFound\nfrom scrambles import *\nimport random\n\nemotes = [\n '๐Ÿ‡บ','<:redU:718597576122826803>',\n '๐Ÿ‡ฉ','<:redD:718598898410717204>',\n '๐Ÿ‡ท','<:redR:718598898561712248>',\n '๐Ÿ‡ฑ','<:redL:718598898507055124>',\n '๐Ÿ‡ซ','<:redF:718598897970446357>',\n '๐Ÿ‡ง','<:redB:718598898301665403>',\n 'โฌ†๏ธ','โฌ‡๏ธ','โฌ…๏ธ','โžก๏ธ','โ†ฉ๏ธ','โ†ช๏ธ','๐Ÿ”„'\n]\n\nreactiondict = {\n '๐Ÿ‡บ':'U', '๐Ÿ‡ฉ':'D', '๐Ÿ‡ท':'R', '๐Ÿ‡ฑ':'L', '๐Ÿ‡ซ':'F', '๐Ÿ‡ง':'B',\n '<:redU:718597576122826803>':\"U'\", '<:redD:718598898410717204>':\"D'\",\n '<:redR:718598898561712248>':\"R'\", '<:redL:718598898507055124>':\"L'\",\n '<:redF:718598897970446357>':\"F'\", '<:redB:718598898301665403>':\"B'\",\n 'โฌ†๏ธ':'x','โฌ‡๏ธ':\"x'\",'โฌ…๏ธ':'y','โžก๏ธ':\"y'\",'โ†ฉ๏ธ':'z','โ†ช๏ธ':\"z'\"\n}\n\n\nclass VirtualCubes(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def bro(self, ctx):\n await ctx.send(\"bro\")\n \n @commands.command()\n async def solve2(self, ctx):\n print('command ran')\n w = white\n y = yello\n g = green\n b = bluee\n r = reeed\n o = orang\n\n themoves = ['']\n while len(themoves) < 10:\n themoves.append(random.choice(URF))\n if themoves[-1] == themoves[-2]:\n del themoves[-1]\n\n for t in URF:\n if t not in themoves:\n themoves[random.randint(1,9)] = t\n\n themoves = [item.replace('_', str(random.choice(specification))) for item in themoves]\n del themoves[0]\n\n message = await ctx.send(scramble2(themoves))\n\n for e in emotes[:18]:\n await message.add_reaction(e)\n\n def check(reaction, user):\n return (reaction.message.id == message.id) and (user.id == ctx.author.id) and (str(reaction) in emotes)\n\n orientations = [\"x2 z z z\", \"x z z z\", \"x' z z z\", \"y z z z\", \"y' z z z\", \"z z z\"]\n\n while True:\n try:\n reaction, user = await self.client.wait_for('reaction_add', check=check, timeout=300)\n except:\n await message.clear_reactions()\n return\n\n if str(reaction):\n try:\n themoves.append(reactiondict[str(reaction)])\n await message.remove_reaction(str(reaction), user)\n except KeyError:\n return\n\n checklist = themoves.copy()\n checkmessage = scramble2(checklist).replace(\"_\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\") \n if checkmessage == w+w+w+w+o+o+g+g+r+r+b+b+o+o+g+g+r+r+b+b+y+y+y+y:\n await message.edit(content=\"POGGERS YOU SOLVED THE CUBE!!\" + \"\\n\" + scramble2(themoves))\n await message.clear_reactions()\n return\n\n for moves in orientations:\n for move in moves.split():\n checklist.append(move)\n checkmessage = scramble2(checklist).replace(\"_\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\") \n if checkmessage == w+w+w+w+o+o+g+g+r+r+b+b+o+o+g+g+r+r+b+b+y+y+y+y:\n await message.edit(content=\"POGGERS YOU SOLVED THE CUBE!!\" + \"\\n\" + scramble2(themoves))\n await message.clear_reactions()\n return\n checklist = themoves.copy()\n\n await message.edit(content=str(' '.join(themoves[9:]) + '\\n' + scramble2(themoves)))\n\n @commands.command()\n async def sandbox(self, ctx):\n # moves inputted by the user through reactions\n themoves = []\n message = await ctx.send(input3(themoves))\n\n # adds reactions\n [await message.add_reaction(e) for e in emotes]\n\n def check(reaction, user):\n return (reaction.message.id == message.id) and (user.id == ctx.author.id) and (str(reaction) in emotes)\n\n # constantly checks if a reaction has been added by the user who invoked the command\n while True:\n try:\n reaction, user = await self.client.wait_for('reaction_add', check=check, timeout=300) # 5 min inactivity\n except:\n await message.clear_reactions()\n return\n \n if str(reaction):\n try:\n themoves.append(reactiondict[str(reaction)]) \n await message.remove_reaction(str(reaction), user)\n except KeyError:\n # had to account for the reset reaction this way because it has to clear the list of moves\n # this is why this emote is not in the dictionary \n if str(reaction) == '๐Ÿ”„':\n themoves = []\n await message.remove_reaction('๐Ÿ”„', user)\n else:\n return\n\n await message.edit(content=' '.join(themoves) + '\\n' + input3(themoves))\n\n @commands.command()\n async def solve3(self, ctx):\n print('command ran')\n\n w = white\n y = yello\n g = green\n b = bluee\n r = reeed\n o = orang\n # very epic 3x3 scrambler (random moves) that is pretty compact and gets the job done\n themoves = ['']\n while len(themoves) < 20:\n gen = random.choice(UD + RL + FB)\n if gen != themoves[-1]: themoves.append(gen)\n for l in UDRLFB:\n if themoves[-1] in l and themoves[-2] in l and len(themoves) < 20:\n themoves.append(random.choice([item for item in (UD + RL + FB) if item not in l]))\n themoves = [item.replace('_', str(random.choice(specification))) for item in themoves]\n del themoves[0]\n\n message = await ctx.send(input3(themoves))\n\n # adds all reactions except for the reset emote\n for e in emotes[:18]:\n await message.add_reaction(e)\n\n def check(reaction, user):\n return (reaction.message.id == message.id) and (user.id == ctx.author.id) and (str(reaction) in emotes)\n\n orientations = [\"x2 z z z\", \"x z z z\", \"x' z z z\", \"y z z z\", \"y' z z z\", \"z z z\"]\n\n while True:\n try:\n reaction, user = await self.client.wait_for('reaction_add', check=check, timeout=300)\n except:\n await message.clear_reactions()\n return\n\n if str(reaction):\n try:\n themoves.append(reactiondict[str(reaction)])\n await message.remove_reaction(str(reaction), user)\n except KeyError:\n return\n\n checklist = themoves.copy()\n checkmessage = input3(checklist).replace(\"_\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\")\n \n if checkmessage == w+w+w+ w+w+w+ w+w+w+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ y+y+y+ y+y+y+ y+y+y:\n await message.edit(content=\"POGGERS YOU SOLVED THE CUBE!!\" + \"\\n\" + input3(themoves))\n await message.clear_reactions()\n return \n\n for moves in orientations:\n for move in moves.split():\n checklist.append(move)\n checkmessage = input3(checklist).replace(\"_\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\") \n if checkmessage == w+w+w+ w+w+w+ w+w+w+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ o+o+o+ g+g+g+ r+r+r+ b+b+b+ y+y+y+ y+y+y+ y+y+y:\n await message.edit(content=\"POGGERS YOU SOLVED THE CUBE!!\" + \"\\n\" + input3(themoves))\n await message.clear_reactions()\n return\n checklist = themoves.copy()\n\n await message.edit(content=str(' '.join(themoves[19:]) + '\\n' + input3(themoves)))\n\n\ndef setup(client):\n client.add_cog(VirtualCubes(client))\n" }, { "alpha_fraction": 0.4644384980201721, "alphanum_fraction": 0.4844919741153717, "avg_line_length": 33.30275344848633, "blob_id": "911fbc4d2ba21043bcf8925de7aa42676b898143", "content_id": "c4225ac1a1484bf321bf40f10d935c4345d172f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3748, "license_type": "no_license", "max_line_length": 111, "num_lines": 109, "path": "/cogs/misc.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom pyTwistyScrambler import scrambler333\nfrom scrambleupdate import *\nfrom scrambles import *\nacceptedmoves = ['U', 'D', 'R', 'L', 'F', 'B', 'Uw', 'Dw', 'Rw', 'Lw', 'Fw', 'Bw']\n\n\nclass Misc(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def test(self, ctx):\n await ctx.send(\"Python: <:peponice:693014841614663700>\")\n\n @commands.command()\n async def avg(self, ctx):\n times = ctx.message.content.split(' ')[2:]\n times = [int(i) for i in times]\n average = (sum(times) - min(times) - max(times)) / (len(times) - 2)\n await ctx.send(\"{:.2f}\".format(float(average)))\n \n @commands.command(aliases=['1','1x1'])\n async def _1(self, ctx, amount: int = 1):\n if amount > 5: amount = 5\n for i in range(amount):\n await ctx.send(scramble1()) \n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.guild)\n async def noob3(self, ctx):\n noobdict = {\n 'U':'top', 'D':'bottom', 'R':'right', 'L':'left', 'F':'front', 'B':'back'\n }\n\n memestring = []\n msg = scrambler333.get_WCA_scramble()\n newmsg = msg.replace(' ','')\n m = len(newmsg) - 1\n while m > -1: \n if newmsg[m] == \"'\":\n memestring.append('turn the ' + noobdict[newmsg[m-1]] + ' face counterclockwise by 90 degrees')\n m -= 2\n elif newmsg[m] == \"2\":\n memestring.append('turn the ' + noobdict[newmsg[m-1]] + ' face by 180 degrees')\n m -= 2\n elif newmsg[m] in acceptedmoves:\n memestring.append('turn the ' + noobdict[newmsg[m]] + ' face clockwise by 90 degrees')\n m -= 1\n else:\n return\n\n memestring = memestring[::-1]\n n = await ctx.send(', '.join(memestring))\n \n await n.add_reaction('๐Ÿ™ƒ') \n\n def check(reaction, user):\n return user == ctx.author and str(reaction.emoji) == '๐Ÿ™ƒ'\n\n try:\n reaction, user = await self.client.wait_for('reaction_add', timeout=10, check=check)\n except:\n await n.clear_reactions()\n else:\n file = scrambleimage(3, msg)\n file.save(\"scramble.png\")\n await ctx.send(file = discord.File(\"scramble.png\"))\n await n.clear_reactions()\n \n @commands.command(pass_context=True)\n async def setup(self, ctx):\n formattedstr = []\n msg = ctx.message.content[10:]\n msg = msg.replace(\"โ€™\", \"'\").replace(\"(\", '').replace(\")\", '').replace(' ', '')\n\n if len(msg) > 0:\n m = len(msg) - 1\n while m > -1: \n if msg[m] == \"'\":\n if msg[m-1] == \"2\":\n formattedstr.append(msg[m-2] + msg[m-1])\n m -= 3\n elif msg[m-1] in acceptedmoves:\n formattedstr.append(msg[m-1])\n m -= 2\n else:\n await ctx.send('invalid notation dud')\n return\n elif msg[m] == \"2\":\n formattedstr.append(msg[m-1] + msg[m])\n m -= 2\n elif msg[m] in acceptedmoves:\n formattedstr.append(msg[m] + \"'\")\n m -= 1\n else:\n await ctx.send('invalid notation dud')\n return\n else: \n await ctx.send('nothing to show bruh')\n return\n\n msg = formattedstr\n await ctx.send(' '.join(msg))\n\ndef setup(client):\n client.add_cog(Misc(client))\n\n" }, { "alpha_fraction": 0.3919607698917389, "alphanum_fraction": 0.49392157793045044, "avg_line_length": 29.72891616821289, "blob_id": "7e4cac404b13b7a67d26765aa03e807a4638da50", "content_id": "db2077ea2a1c4e0e23e6ef61c86d31c72ad43c75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5100, "license_type": "no_license", "max_line_length": 151, "num_lines": 166, "path": "/megaimage.py", "repo_name": "RitchieFu/scrambly-boi-py", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image, ImageDraw\n\nwh = np.repeat(\"Wh\", 11)\ngr = np.repeat(\"Gr\", 11)\nre = np.repeat(\"Re\", 11)\nbl = np.repeat(\"Bl\", 11) \nye = np.repeat(\"Ye\", 11) \npu = np.repeat(\"Pu\", 11)\n\nra = np.repeat(\"Ra\", 11) \nbe = np.repeat(\"Be\", 11)\npi = np.repeat(\"Pi\", 11) \nlg = np.repeat(\"Lg\", 11) \nro = np.repeat(\"Or\", 11) \nlb = np.repeat(\"Lb\", 11) \n\n\ndef megaimage(scramble):\n scramble = scramble.split()\n\n colors = {\n \"Wh\": (255, 255, 255), \n \"Gr\": (0, 102, 0),\n \"Re\": (221, 0, 0),\n \"Bl\": (0,0,187),\n \"Ye\": (255,204,0),\n \"Pu\": (136,17,255),\n\n \"Ra\": (112,128,144),\n \"Be\": (225, 198, 153),\n \"Pi\": (255, 153, 255),\n \"Lg\": (119, 238, 0),\n \"Or\": (255,136,51),\n \"Lb\": (136, 221, 255)\n }\n\n grRot = (1,2,4,5,7,8,9,10)\n lbRot = (3,4,6,7,5,8,9,10)\n roRot = (7,10,6,9,1,0,5,8)\n yeRot = (9,8,4,7,1,0,3,6)\n whRot = (1,2,6,3,9,10,7,4)\n\n beRot = (0,1,2,5,6,3,8,9,10,7,4)\n lgRot = (4,7,10,3,6,9,2,1,0,5,8)\n blRot = (2,5,10,1,4,9,0,3,6,7,8)\n dRot = (3,4,5,6,7,8,9,10)\n\n pgTurn = (8,5,0,1,2,9,6,3,10,7,4)\n\n def R():\n global pi\n for i in range(8):\n gr[grRot[i]], lb[lbRot[i]], ro[roRot[i]], ye[yeRot[i]], wh[whRot[i]] = lb[lbRot[i]], ro[roRot[i]], ye[yeRot[i]], wh[whRot[i]], gr[grRot[i]]\n\n for i in range(11):\n re[i], be[beRot[i]], ra[beRot[i]], lg[lgRot[i]], bl[blRot[i]] = be[beRot[i]], ra[beRot[i]], lg[lgRot[i]], bl[blRot[i]], re[i]\n\n pi = [pi[i] for i in pgTurn]\n\n def D():\n global ra\n for i in range(8):\n gr[dRot[i]], re[dRot[i]], bl[dRot[i]], ye[dRot[i]], pu[dRot[i]] = pu[dRot[i]], gr[dRot[i]], re[dRot[i]], bl[dRot[i]], ye[dRot[i]]\n \n for i in range(11):\n be[i], pi[i], lg[i], ro[i], lb[i] = lb[i], be[i], pi[i], lg[i], ro[i]\n\n ra = [ra[i] for i in pgTurn]\n\n def U():\n global wh\n for i in range(3):\n gr[i], re[i], bl[i], ye[i], pu[i] = re[i], bl[i], ye[i], pu[i], gr[i] \n\n wh = [wh[i] for i in pgTurn]\n\n moveMap = {\n \"R\":R, \"D\":D, \"U\":U\n }\n\n for move in scramble:\n if move[1:] == \"++\":\n spec = 2\n elif move[1:] == \"--\":\n spec = 3\n elif move[1:] == \"'\":\n spec = 4\n else:\n spec = 1\n\n move = move[0]\n\n for i in range(spec):\n moveMap[move]()\n\n pointyFaces = [list(array) for array in [wh, be, pi, lg, ro, lb, ra]]\n\n def draw_face(face, rDegrees: int = 0):\n \n def draw_edge(fill, degrees):\n width = 250\n height = 250\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n draw.polygon((159.4,50,125,75,172,109,184,68), fill=fill, outline=\"black\")\n return img.rotate(degrees)\n\n def draw_corner(fill, degrees):\n width = 250\n height = 250\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n draw.polygon((125,25,90.6,50,125,75,159,50), fill=fill, outline=\"black\")\n return img.rotate(degrees)\n \n width = 250\n height = 250\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n\n if list(face) in pointyFaces:\n order = (0,1,2,3,4,7,10,9,8,5,6)\n else:\n order = (0,1,2,5,10,9,8,7,6,3,4)\n \n face = [face[i] for i in order]\n\n corners = list(face[:-1:2])\n edges = list(face[1:-1:2])\n\n draw.regular_polygon((125,125,50), 5, fill=colors[face[-1]], outline=\"black\")\n\n degrees = 0\n for i in corners:\n img.paste(draw_corner(colors[i], degrees), (0,0), mask=draw_corner(colors[i], degrees))\n degrees -= 72\n \n degrees = 0\n for i in edges:\n img.paste(draw_edge(colors[i], degrees), (0,0), mask=draw_edge(colors[i], degrees))\n degrees -= 72\n\n return img.rotate(rDegrees)\n\n\n width = 1050\n height = 600\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0))\n img.paste(draw_face(wh, 72), (200,200), mask=draw_face(wh, 72))\n img.paste(draw_face(gr, 36), (200,360), mask=draw_face(gr, 36))\n img.paste(draw_face(pu, -36), (46,250), mask=draw_face(pu, -36))\n img.paste(draw_face(re, 108), (354,250), mask=draw_face(re, 108))\n img.paste(draw_face(bl, 180), (295,70), mask=draw_face(bl, 180))\n img.paste(draw_face(ye, -108), (105,70), mask=draw_face(ye, -108))\n\n img.paste(draw_face(lb, 288), (757,380), mask=draw_face(lb, 288))\n img.paste(draw_face(be, 216), (567,380), mask=draw_face(be, 216))\n img.paste(draw_face(pi, 144), (508,200), mask=draw_face(pi, 144))\n img.paste(draw_face(lg, 72), (662,90), mask=draw_face(lg, 72))\n img.paste(draw_face(ro, 0), (816,200), mask=draw_face(ro, 0))\n img.paste(draw_face(ra, 252), (662,250), mask=draw_face(ra, 252))\n\n img = img.crop((60, 100, width, height))\n \n return img" } ]
18
geosolutions-it/lamma
https://github.com/geosolutions-it/lamma
441c7158deb61b3de8a6a9ad4cbcae095a9f7466
08770c148096f9ae9fbf1861b979b786133d3eec
287617c8ef82861cc5cd8ca08e4ae2a2774c31dc
refs/heads/master
2023-03-31T11:28:24.631378
2014-03-05T08:07:44
2014-03-05T08:07:44
5,764,336
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4145936965942383, "alphanum_fraction": 0.49419569969177246, "avg_line_length": 23.1200008392334, "blob_id": "6f959597b58f60cc6b763f1480d41d960890fed0", "content_id": "06589bd3d28ac85438b365f06c589739d4beb304", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 63, "num_lines": 25, "path": "/GEOBATCH_CONFIG_DIR/python/calc.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Carlo Cancellieri\n\ndef calculateRGB(case, values):\n if (case == 'airmass'):\n return airMass(values)\n else:\n return None\n \ndef airMass(values):\n data1 = values[0]\n data2 = values[1]\n data3 = values[2]\n data4 = values[3]\n \n red = data1 - data2\n green = data3 - data4\n blue = data1\n \n byte_red = 255 * ((red - (-25)) / (0 - (-25))) ** 1 / 1\n byte_green = 255 * ((green - (-40)) / (5 - (-40))) ** 1 / 1\n byte_blue = 255 * ((blue - (243)) / (208 - (243))) ** 1 / 1\n \n return [byte_red, byte_green, byte_blue]\n" }, { "alpha_fraction": 0.721847414970398, "alphanum_fraction": 0.7306694388389587, "avg_line_length": 25.397260665893555, "blob_id": "229a98a3ee38dc0130da825835ecbef5981fdc1e", "content_id": "bbbadf7ddab62b29663c3d03aac30b23465fe67c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1927, "license_type": "no_license", "max_line_length": 83, "num_lines": 73, "path": "/geobatch/netcdf2geotiff/src/main/java/it/geosolutions/geobatch/metocs/netcdf2geotiff/grib1/MetocsImageMosaicDictionary.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\n * GeoBatch - Open Source geospatial batch processing system\n * http://geobatch.codehaus.org/\n * Copyright (C) 2007-2008-2009 GeoSolutions S.A.S.\n * http://www.geo-solutions.it\n *\n * GPLv3 + Classpath exception\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n */\n\npackage it.geosolutions.geobatch.metocs.netcdf2geotiff.grib1;\n\nimport java.util.Map;\n\nimport it.geosolutions.geobatch.metocs.netcdf2geotiff.checker.MetocsBaseDictionary;\n\n/**\n * \n * @author Carlo Cancellieri - [email protected]\n *\n */\npublic class MetocsImageMosaicDictionary extends MetocsBaseDictionary {\n\n\tpublic MetocsImageMosaicDictionary(\n\t\t\tMap<String, Map<String, String>> dictionary) {\n\t\tsuper(dictionary);\n\t}\n\t\n\t//a list of styles comma separated\n\tpublic final static String STYLES_KEY=\"styles\";\n\t\n\t// the default style\n\tpublic final static String DEFAULT_STYLE_KEY=\"defaultStyle\";\n\n\t// TODO\n//\tbackgroundValue\n//\toutputTransparentColor\n//\tinputTransparentColor\n//\tallowMultithreading\n//\tuseJaiImageRead\n//\ttileSizeH\n//\ttileSizeW\n//\t\n////\t<!--NONE, REPROJECT_TO_DECLARED, FORCE_DECLARED-->\n//\tprojectionPolicy\n////\t<!-- METADATA -->\n//\ttimeRegex\n//\ttimeDimEnabled\n////\t<!-- LIST, CONTINUOUS_INTERVAL, DISCRETE_INTERVAL -->\n//\ttimePresentationMode\n//\t\n//\televationRegex\n//\televDimEnabled\n//\televationPresentationMode\n//\n//\tdatastorePropertiesPath\n\t\n\t\n\t\n\n}\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.699999988079071, "avg_line_length": 14.714285850524902, "blob_id": "7da30a1c479a0726f940d07534caf0b8f56e7371", "content_id": "09d06d1e804c76cc14ead7cd8591188fcb5d4451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 110, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/GEOBATCH_CONFIG_DIR/commons/prova.sh", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nFILEOUT=\"/opt/GEOBATCH_CONFIG_DIR/commons/prova.log\"\ndate > $FILEOUT\necho $1 >> $FILEOUT\n\nexit 0\n" }, { "alpha_fraction": 0.6598034501075745, "alphanum_fraction": 0.6635833978652954, "avg_line_length": 28.31812286376953, "blob_id": "2291bbafd0169fc5045755218dd3fb1df86487a7", "content_id": "0716ae830b89a84fa0c6d4f30874a72992ba5a3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 22487, "license_type": "no_license", "max_line_length": 150, "num_lines": 767, "path": "/geobatch/netcdf2geotiff/src/main/java/it/geosolutions/geobatch/metocs/netcdf2geotiff/checker/NetcdfChecker.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\n * GeoBatch - Open Source geospatial batch processing system\n * http://geobatch.codehaus.org/\n * Copyright (C) 2007-2008-2009 GeoSolutions S.A.S.\n * http://www.geo-solutions.it\n *\n * GPLv3 + Classpath exception\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n */\npackage it.geosolutions.geobatch.metocs.netcdf2geotiff.checker;\n\nimport it.geosolutions.geobatch.metocs.netcdf2geotiff.Netcdf2GeotiffOutput;\nimport it.geosolutions.geobatch.metocs.utils.converter.ConverterManager;\nimport it.geosolutions.geobatch.metocs.utils.io.METOCSActionsIOUtils;\nimport it.geosolutions.tools.commons.time.TimeParser;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.text.ParseException;\nimport java.text.SimpleDateFormat;\nimport java.util.Calendar;\nimport java.util.Date;\nimport java.util.GregorianCalendar;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.TimeZone;\n\nimport org.geotools.geometry.GeneralEnvelope;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport ucar.ma2.Array;\nimport ucar.ma2.InvalidRangeException;\nimport ucar.ma2.Range;\nimport ucar.ma2.Section;\nimport ucar.nc2.Attribute;\nimport ucar.nc2.Group;\nimport ucar.nc2.NetcdfFile;\nimport ucar.nc2.Variable;\nimport ucar.units.Converter;\nimport ucar.units.UnitDBException;\n\n/**\n * \n * @author Carlo Cancellieri - [email protected]\n *\n */\npublic abstract class NetcdfChecker<OutputType> extends Netcdf2GeotiffOutput<OutputType>{\n\t\n\t/**\n\t * method to override to initialize internal members\n\t * @return\n\t */\n\tpublic abstract boolean initVar(final Variable var);\n\tpublic abstract Converter getConverter();\n\tpublic abstract Number getFillValue(); \n\tpublic abstract String buildName(final Variable var, final int ... coords);// TODO change this...\n\n\tpublic abstract int getLonSize();\n\tpublic abstract int getLatSize();\n public abstract int getTimeSize();\n\tpublic abstract int getZetaSize();\n\tpublic abstract GeneralEnvelope getEnvelope();\n\n /**\n * calculate general envelop\n * \n * @param lat\n * @param lon\n * \n * @return\n */\n public GeneralEnvelope getVarEnvelope(final Array lat, final Array lon) {\n final double[] bbox = METOCSActionsIOUtils.computeExtrema(lat, lon);\n // building Envelope\n final GeneralEnvelope envelope = new GeneralEnvelope(METOCSActionsIOUtils.WGS_84);\n if (!halfPixelExtend) {\n envelope.setRange(0, bbox[0], bbox[2]);\n envelope.setRange(1, bbox[1], bbox[3]);\n } else {\n // Getting coordinates Span\n final double diffX = bbox[2] - bbox[0];\n final double diffY = Math.abs(bbox[1] - bbox[3]);\n\n // Getting number of grid points\n final double nX = lon.getSize();\n final double nY = lat.getSize();\n\n // Getting half pixel size\n final double halfX = (diffX / ((nX - 1) * 2));\n final double halfY = (diffY / ((nY - 1) * 2));\n\n //Updating the envelope\n envelope.setRange(0, bbox[0] - halfX, bbox[2] + halfX);\n envelope.setRange(1, bbox[1] - halfY, bbox[3] + halfY);\n }\n return envelope;\n }\n\n /**\n * @return the dictionary\n * @note the dictionary may never be null!\n */\n\tpublic MetocsBaseDictionary getDictionary() {\n\t\treturn dictionary;\n\t}\n\t\n\t/**\n * @return the converterManager\n * @note the converterManager may never be null!\n */\n\tpublic ConverterManager getConverterManager(){\n\t\treturn converterManager;\n\t}\n\n\t// //////////////////////////////\n\t// GLOBAL Attributes\n\t// //////////////////////////////\n\tpublic String getRunTime() {\n\t\tfinal Date date = getRunTimeDate();\n\t\tif (date != null) {\n\t\t\treturn formatDate(date);\n\t\t}\n\t\treturn null;\n\t}\n\n protected String formatDate(Date date) {\n try {\n synchronized (sdf) {\n return sdf.format(date);\n }\n } catch (Exception e) {\n if (LOGGER.isWarnEnabled())\n LOGGER.warn(\n \"Unable to format the RunTime date attribute string: \" + e.getMessage(), e);\n }\n return null;\n }\n /**\n\t * Return the runtime date as Date object.<br>\n\t * - First the dictionary is scanned searching at root level for the\n\t * attribute matching the RUNTIME_KEY<br>\n\t * - If it is found the netcdf file is queried and the runtime attribute is\n\t * read (else null is returned)<br>\n\t * - The attribute is parsed using the gb-tools TimeParser to get the date\n\t * from the String<br>\n\t * - If success the date is returned (else null is returned)<br>\n\t * \n\t * @see TimeParser\n\t * @see MetocsBaseDictionary.RUNTIME_KEY\n\t * @note you may override this method if the runtime attribute is not an\n\t * iso801 compatible String.\n\t * @return\n\t */\n\tpublic Date getRunTimeDate() {\n\t\tfinal Attribute attr = getGlobalAttrByKey(MetocsBaseDictionary.RUNTIME_KEY);\n\t\tif (attr != null) {\n\t\t\tfinal TimeParser parser = new TimeParser();\n\t\t\tfinal List<Date> dates;\n\t\t\ttry {\n\t\t\t\tdates = parser.parse(attr.getStringValue());\n\t\t\t\tif (dates.size() > 0) {\n\t\t\t\t\treturn dates.get(0);\n\t\t\t\t}\n\t\t\t} catch (ParseException e) {\n\t\t\t\tif (LOGGER.isWarnEnabled())\n\t\t\t\t\tLOGGER.warn(\n\t\t\t\t\t\t\t\"Unable to parse the string \"\n\t\t\t\t\t\t\t\t\t+ attr.getStringValue() + \" as date: \"\n\t\t\t\t\t\t\t\t\t+ e.getMessage(), e);\n\t\t\t}\n\t\t}\n\t\treturn null;\n\t}\n\n\t/**\n\t * Search into the dictionary the TAU value for the ROOT node\n\t * \n\t * @return\n\t */\n\tpublic String getTAU() {\n\t\tfinal Attribute attr = getGlobalAttrByKey(MetocsBaseDictionary.TAU_KEY);\n\t\tif (attr != null)\n\t\t\treturn attr.getStringValue();\n\t\telse\n\t\t\treturn null;\n\t}\n\n\t/**\n\t * Search into the dictionary the NOTADA value for the ROOT node\n\t * \n\t * @return\n\t */\n\tpublic String getNoData() {\n\t\tfinal Attribute attr = getGlobalAttrByKey(MetocsBaseDictionary.NODATA_KEY);\n\t\tif (attr != null)\n\t\t\treturn attr.getStringValue();\n\t\telse\n\t\t\treturn null;\n\t}\n\n\t/**\n\t * return the variable name with prefix and suffix in the form:<br>\n\t * \n\t * PREFIXVariableNameSUFFIX<br>\n\t * <br>\n\t * The SUFFIX and the PREFIX variables can be defined into the dictionary as\n\t * root (global) or section (per variable) attributes.\n\t * \n\t * @note you may override this method to change the VARIABLE (DIRECTORY)\n\t * NAMING CONVENTION since this name is used to build the output\n\t * variable directory name.\n\t * \n\t * @param var\n\t * the variable to use to getName\n\t * @return a string representing the name in the form described above\n\t */\n\tpublic String getVarName(final Variable var) {\n\t\treturn getPrefix(var.getShortName()) + var.getShortName()\n\t\t\t\t+ getSuffix(var.getShortName());\n\t}\n\n\t/**\n\t * \n\t * @param varName\n\t * @return\n\t */\n\tpublic String getPrefix(final String varName) {\n\t\tfinal String prefix = getDictionary().getValueFromDictionary(varName,\n\t\t\t\tMetocsBaseDictionary.PREFIX_KEY);\n\t\tif (prefix != null)\n\t\t\treturn prefix;\n\t\telse\n\t\t\treturn \"\";\n\t}\n\n\t/**\n\t * \n\t * @param varName\n\t * @return\n\t */\n\tpublic String getSuffix(final String varName) {\n\t\tfinal String suffix = getDictionary().getValueFromDictionary(varName,\n\t\t\t\tMetocsBaseDictionary.SUFFIX_KEY);\n\t\tif (suffix != null)\n\t\t\treturn suffix;\n\t\telse\n\t\t\treturn \"\";\n\t}\n\n\t/**\n\t * Try to search for the latitude dimension for the passed variable. The\n\t * used name for the latitude dimension will be read from the dictionary.\n\t * Return null if no latitude dimension is assigned.\n\t * \n\t * @see NetcdfChecker.getDimVar()\n\t * @param var\n\t * the variable to query for the latitude variable\n\t * @return the variable representing the latitude dimension or null\n\t */\n\tpublic Variable getLat(final Variable var) {\n\t\treturn getDimVar(var, MetocsBaseDictionary.LATITUDE_KEY);\n\t}\n\n\t/**\n\t * Try to search for the longitude dimension for the passed variable. The\n\t * used name for the longitude dimension will be read from the dictionary.\n\t * Return null if no longitude dimension is assigned.\n\t * \n\t * @see NetcdfChecker.getDimVar()\n\t * @param var\n\t * the variable to query for the longitude variable\n\t * @return the variable representing the longitude dimension or null\n\t */\n\tpublic Variable getLon(final Variable var) {\n\t\treturn getDimVar(var, MetocsBaseDictionary.LONGITUDE_KEY);\n\t}\n\n\t/**\n\t * Try to search for the elevation/depth ('Z') dimension for the passed\n\t * variable. The used name for the elevation/depth ('Z') dimension will be\n\t * read from the dictionary. Return null if no elevation/depth ('Z')\n\t * dimension is assigned.\n\t * \n\t * @see NetcdfChecker.getDimVar()\n\t * @param var\n\t * the variable to query for the elevation/depth ('Z') variable\n\t * @return the variable representing the elevation/depth ('Z') dimension or\n\t * null\n\t */\n\tpublic Variable getZ(final Variable var) {\n\t\treturn getDimVar(var, MetocsBaseDictionary.Z_KEY);\n\t}\n\n\t/**\n\t * Try to search for the time dimension for the passed variable. The used\n\t * name for the time dimension will be read from the dictionary. Return null\n\t * if no time dimension is assigned.\n\t * \n\t * @see NetcdfChecker.getDimVar()\n\t * @param var\n\t * the variable to query for the time variable\n\t * @return the variable representing the time dimension or null\n\t */\n\tpublic Variable getTime(final Variable var) {\n\t\treturn getDimVar(var, MetocsBaseDictionary.TIME_KEY);\n\t}\n\n\t/**\n\t * Return the _fillValue attribute (as Number) for the specified variable\n\t * \n\t * @param time\n\t * @return the _fillValue attribute (as Number), can return null.\n\t */\n\tpublic Number getFillValue(final Variable var) {\n\t\tfinal Attribute attr = getVarAttrByKey(var,\n\t\t\t\tMetocsBaseDictionary.FILLVALUE_KEY);\n\t\tif (attr != null)\n\t\t\treturn attr.getNumericValue();\n\t\telse\n\t\t\treturn null;\n\t}\n\t\n\t/**\n\t * \n\t * Try to parse the dictionary to get the specified alias for a unit to convert to\n\t * if it is found try to get the converter from the UnitDB using the variable unit\n\t * as starting unit and the alias found into the dictionary as unit to convert to. \n\t * \n\t * @param var the variable to try to convert\n\t * @return the converter or null (if no converter is found)\n\t */\n\tpublic Converter getVarConversion(final Variable var) {\n\t\t// try to parse conversion value from the dictionary\n\t\tfinal String conversionVal = getDictionary().getValueFromDictionary(var.getShortName()\n\t\t\t\t, MetocsBaseDictionary.CONVERSION_KEY);\n\t\tif (conversionVal!=null)\n\t\t\treturn converterManager.getConverter(var.getUnitsString(), conversionVal);\n\t\telse{\n\t\t\tif (LOGGER.isInfoEnabled())\n\t\t\t\tLOGGER.info(\"No converter specified for this variable\");\n\t\t\treturn null;\n\t\t}\n\t\t\n\t}\n\n\t/**\n\t * Return the t^th time in millisecs\n\t * \n\t * @note you may override this method if the time array is of String type\n\t * \n\t * @param startDate\n\t * the BaseTime in milliseconds\n\t * @param timeVar\n\t * the variable representing the time vector\n\t * @param t\n\t * the t^th time to calculate\n\t * @return\n\t */\n\tpublic long getTimeInstant(final long startDate, final Array time,\n\t\t\tfinal int t, final Long conversion) {\n\t\t\n\t\tlong timeValue = time.getLong(t);\n\t\tif (timeValue < 0) {\n\t\t\tif (LOGGER.isWarnEnabled())\n\t\t\t\tLOGGER.warn(\"The time TAU is: \" + timeValue);\n\t\t} else {\n\t\t\ttimeValue = startDate+getDeltaTime(time, t, conversion);\n\t\t}\n\t\t\n\t\tfinal Calendar roundedTimeInstant = new GregorianCalendar();//UTC_TZ);\n\t\troundedTimeInstant.setTimeInMillis(timeValue);\n\n\t\treturn roundedTimeInstant.getTimeInMillis();\n\n\t}\n\n\t/**\n\t * Try to parse the dictionary to get the value of the time unit conversion\n\t * (from unknown milliseconds) can return null.\n\t * \n\t * @param timeVarName\n\t * @return a Long representing the conversion constant to convert the time\n\t * of the passed timeVarName variable into milliseconds\n\t */\n\tpublic Long getTimeConversion(final String timeVarName) {\n\t\t// try to parse conversion value from the dictionary\n\t\tfinal String conversionVal = getDictionary().getValueFromDictionary(\n\t\t\t\ttimeVarName, MetocsBaseDictionary.TIME_CONVERSION_KEY);\n\t\tif (conversionVal != null) {\n\t\t\ttry {\n\t\t\t\treturn Long.parseLong(conversionVal);\n\t\t\t} catch (NumberFormatException e) {\n\t\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\t\tLOGGER.error(\"Unable to parse conversion value for the variable: \\'\"\n\t\t\t\t\t\t\t+ timeVarName + \"\\'.\");\n\t\t\t}\n\t\t}\n\t\treturn null;\n\t}\n\n\t/**\n\t * Dim return a long representing the time in milliseconds from the BaseTime\n\t * for the specified variable\n\t * \n\t * @param ncFileIn\n\t * @param time\n\t * @param index\n\t * @return\n\t * @throws InvalidRangeException\n\t * @throws IOException\n\t */\n\tpublic long getTime(final long baseTime, final Variable time,\n\t\t\tfinal int index) throws InvalidRangeException, IOException {\n\t\tfinal Section section = new Section(time.getShape());\n\t\tsection.setRange(0, new Range(index, index));\n\t\treturn (baseTime + time.read(section).getLong(0));\n\t}\n\n\t/**\n\t * return a long representing the BaseTime in milliseconds for the specified\n\t * variable\n\t * \n\t * @param ncFileIn\n\t * @param time\n\t * @param index\n\t * @return\n\t */\n\tpublic long getBaseTime(final Variable time) {\n\t\tfinal Attribute attr = getVarAttrByKey(time,\n\t\t\t\tMetocsBaseDictionary.BASETIME_KEY);\n\t\tif (attr != null) {\n\t\t\ttry {\n\t\t\t\tfinal TimeParser parser = new TimeParser();\n\t\t\t\tfinal List<Date> dates;\n\t\t\t\tdates = parser.parse(attr.getStringValue());\n\t\t\t\tif (dates.size() > 0) {\n\t\t\t\t\treturn dates.get(0).getTime();\n\t\t\t\t}\n\t\t\t} catch (ParseException e) {\n\t\t\t\tif (LOGGER.isWarnEnabled())\n\t\t\t\t\tLOGGER.warn(\"Unable to parse the \" + attr.getName()\n\t\t\t\t\t\t\t+ \" attribute string: \" + e.getMessage(), e);\n\t\t\t}\n\t\t}\n\t\treturn -1;\n\t}\n\n\t// //////////////////////////////\n\t// Protected\n\t// //////////////////////////////\n\n\tprivate final long DELTA = 3600000;\n\n\tprotected final Logger LOGGER;\n\n\tprivate final NetcdfFile ncFileIn;\n\n\tprivate final MetocsBaseDictionary dictionary;\n\n\tprivate final ConverterManager converterManager;\n\t\n\tprivate final SimpleDateFormat sdf;\n\n /**\n * halfPixelExtend specify whether we need to apply an half pixel extension. As an instance, for underlying GRIB files, NetCDF lon/lat coordinates\n * are the coordinates of the grid points Therefore we need to add an half pixel to both side of both axes to get the full extent.\n */\n private boolean halfPixelExtend;\n\n public boolean isHalfPixelExtend() {\n return halfPixelExtend;\n }\n\n public void setHalfPixelExtend(boolean halfPixelExtend) {\n this.halfPixelExtend = halfPixelExtend;\n }\n\n private static final TimeZone UTC_TZ = TimeZone.getTimeZone(\"UTC\");\n\n\tprivate static final String TIME_FORMAT = \"yyyyMMdd'T'HHmmssSSS'Z'\"; // TODO\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t// move\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t// into\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t// the\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t// dictionary\n\tprotected final SimpleDateFormat getSimpleDateFormat(){\n\t\treturn sdf;\n\t}\n\t\n\tprotected String getTimeFormat(){\n\t\treturn TIME_FORMAT;\n\t}\n\t\n\tprotected TimeZone getTimeZone(){\n\t\treturn UTC_TZ;\n\t}\n\n\t/**\n\t * Constructor\n\t * \n\t * @param ncFileIn\n\t * @throws UnitDBException \n\t */\n\tprotected NetcdfChecker(final NetcdfFile ncFileIn,\n\t\t\tfinal File dictionaryFile, final NetcdfCheckerSPI spi) throws Exception {\n\t\tLOGGER = LoggerFactory.getLogger(spi.getClass());\n\t\t\n\t\tdictionary = spi.readDictionary(dictionaryFile);\n\t\tsdf = new SimpleDateFormat(getTimeFormat());\n\t\tsdf.setTimeZone(getTimeZone());\n\t\t\n\t\tconverterManager=new ConverterManager();\n\t\tif (LOGGER.isDebugEnabled()){\n\t\t\tLOGGER.debug(converterManager.toString());\n\t\t}\n\t\t\n\t\t// load global conversion alias\n\t\tfinal Map<String, String> alias_section=dictionary.getVal(MetocsBaseDictionary.CONVERSION_SECTION_KEY);\n\t\tif (alias_section!=null){\n\t\t\tconverterManager.addAlias(alias_section);\n\t\t}\n\t\t\n\t\tif (ncFileIn != null)\n\t\t\tthis.ncFileIn = ncFileIn;\n\t\telse\n\t\t\tthrow new NullPointerException(\n\t\t\t\t\t\"Unable to initialize a checker using a null NetcdfFile as input.\");\n\n\t\t\n\t}\n\n\t/**\n\t * return the global attribute matching the attrKey into the dictionary or\n\t * null.\n\t * \n\t * @param var\n\t * @param attrKey\n\t * @return\n\t */\n\tprotected Attribute getGlobalAttrByKey(final String attrKey) {\n\t\tfinal String name = getDictionary().getValueFromRootDictionary(attrKey);\n\t\tif (name == null)\n\t\t\treturn null;\n\t\telse\n\t\t\treturn getGlobalAttr(name);\n\t}\n\n\t/**\n\t * return the attribute associated to the passed variable and matching the\n\t * attrKey into the dictionary or null.\n\t * \n\t * @param var\n\t * @param attrKey\n\t * @return\n\t */\n\tprotected Attribute getVarAttrByKey(final Variable var, final String attrKey) {\n\n\t\tfinal String name = getDictionary().getValueFromDictionary(\n\t\t\t\tvar.getShortName(), attrKey);\n\t\tif (name == null) {\n\t\t\treturn null;\n\t\t}\n\n\t\treturn var.findAttributeIgnoreCase(name);\n\t}\n\n\t/**\n\t * Get the variable representing a dimension for the passed variable ('var')\n\t * trying to parse the vocabulary using the 'VarName' as key.<br>\n\t * Search is performed first at var level, if no result is found a ROOT\n\t * (dictionary) level search is performed. If no result is found (into\n\t * dictionary or into the dataset) null is returned.\n\t * \n\t * @param var\n\t * @param VarNameKey\n\t * @return\n\t */\n\tprotected Variable getDimVar(final Variable var, final String varName) {\n\t\tfinal String name = getDictionary().getValueFromDictionary(\n\t\t\t\tvar.getShortName(), varName);\n\t\tif (name == null) {\n\t\t\treturn null;\n\t\t}\n\t\t// verify if the found variable is effectively a dimension for the\n\t\t// variable 'var'\n\t\tfinal int dimIndex = var.findDimensionIndex(name);\n\t\tif (dimIndex < 0) {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\"Unable to find \" + name\n\t\t\t\t\t\t+ \" dimension into the variable named: \"\n\t\t\t\t\t\t+ var.getShortName());\n\t\t\treturn null;\n\t\t} else {\n\t\t\treturn ncFileIn.findVariable(var.getParentGroup().getName() + \"/\" + name);\n\t\t}\n\t}\n\n\t/**\n\t * Search a Variable (by name) into the dataset using the name found into\n\t * the ROOT node of the dictionary.\n\t * \n\t * @param dimName\n\t * @return\n\t */\n\tprotected Variable getVarByKey(final String varNameKey) {\n\n\t\tfinal String name = getDictionary().getValueFromRootDictionary(\n\t\t\t\tvarNameKey);\n\t\tif (name == null) {\n\t\t\treturn null;\n\t\t}\n\n\t\treturn findVariable(name);\n\t}\n\n\t// /////////////////////////\n\t// PRIVATE\n\t// /////////////////////////\n\n\t/**\n\t * Return the DELTA (TAU) in milliseconds for the specified time variable at\n\t * the specified index\n\t * \n\t * @param time\n\t * @param t\n\t * \n\t * @return\n\t */\n\tprivate long getDeltaTime(final Array time, final int t,\n\t\t\tfinal Long conversion) {\n\t\tfinal long deltaValue;\n//\t\tif (t > 0) {\n//\t\t\tdeltaValue = Math.abs(time.getLong(t - 1) - time.getLong(t));\n//\t\t} else\n\t\t\tdeltaValue = time.getLong(t);\n\n\t\tif (conversion == null) {\n\t\t\t// apply standard conversion from hour\n\t\t\treturn DELTA * deltaValue;\n\t\t} else {\n\t\t\treturn conversion * deltaValue; // from hour to millisec(s)\n\t\t}\n\t}\n\n\t// hide empty constructors\n\tprivate NetcdfChecker() {\n\t\tncFileIn = null;\n\t\tconverterManager=null;\n\t\tLOGGER = null;\n\t\tdictionary = null;\n\t\tsdf = null;\n\t};\n\n\tprivate Attribute getVarAttr(final Variable var, final String name) {\n\t\tfinal Attribute attr = var.findAttribute(name);\n\t\tif (attr != null)\n\t\t\treturn attr;\n\t\telse {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\"Unable to find attribute named: \\'\" + name\n\t\t\t\t\t\t+ \"\\' associated to the variable: \\'\" + var.getName()\n\t\t\t\t\t\t+ \"\\'.\");\n\t\t\treturn null;\n\t\t}\n\t}\n\n\t/**\n\t * search into the passed (opened and !null) netcdf file the attribute name\n\t * string (not null) as global attribute and return the found Attribute\n\t * object (or null if not found)\n\t * \n\t * @param ncFileIn\n\t * (must be not null and opened) the netcdf object representing\n\t * the reading dataset\n\t * @param attrNameKey\n\t * (must be not null) the name representing the attribute to\n\t * search for\n\t * @return the searched global Attribute.\n\t */\n\tprotected Attribute getGlobalAttr(final String attrName) {\n\t\tAttribute attr;\n\t\tfinal Group grp;\n\t\tif ((attr = ncFileIn.findGlobalAttribute(attrName)) != null) {\n\t\t\treturn attr;\n\t\t}\n\t\t/*\n\t\t * @note Carlo Cancellieri 16 Dec 2010 Search the global attributes as\n\t\t * global attributes or as attributes of the root group.\n\t\t */\n\t\telse if ((grp = ncFileIn.getRootGroup()) != null) {\n\t\t\tif ((attr = grp.findAttribute(attrName)) != null) {\n\t\t\t\treturn attr;\n\t\t\t} else\n\t\t\t\treturn null;\n\t\t} else {\n\t\t\tfinal String message = \"NetcdfChecker.getGlobalAttr(): Unable to find \\'\"\n\t\t\t\t\t+ attrName + \"\\' global variable in the source file\";\n\t\t\tif (LOGGER.isWarnEnabled())\n\t\t\t\tLOGGER.warn(message);\n\t\t\treturn null;\n\t\t}\n\t}\n\n\t/**\n\t * look into the dataset for a 'name' named variable\n\t * \n\t * @param name\n\t * @return\n\t */\n\tprivate Variable findVariable(final String name) {\n\n\t\tfinal Variable var = ncFileIn.findVariable(name);\n\t\tfinal Group grp;\n\t\tif (var != null) {\n\t\t\treturn var;\n\t\t}\n\t\t/*\n\t\t * @note Carlo Cancellieri 16 Dec 2010 Search the global attributes as\n\t\t * global attributes or as attributes of the root group.\n\t\t */\n\t\telse if ((grp = ncFileIn.getRootGroup()) != null) {\n\t\t\t// TODO search in ROOT group and test findTopVariable()\n\t\t\treturn grp.findVariable(name);\n\t\t} else {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\"Unable to find \\'\" + name\n\t\t\t\t\t\t+ \"\\' variable into the dataset.\");\n\t\t\treturn null;\n\t\t}\n\t}\n public Date getTimeUnit(Variable timeVar) {\n Attribute att = timeVar.findAttribute(\"units\");\n if (att != null) {\n String units = att.getStringValue();\n if (units != null && units.contains(\"since \")) {\n String timeUnits = units.substring(units.indexOf(\"since \") + \"since \".length());\n final TimeParser parser = new TimeParser();\n final List<Date> dates;\n try {\n dates = parser.parse(timeUnits);\n if (dates.size() > 0) {\n return dates.get(0);\n }\n } catch (ParseException e) {\n if (LOGGER.isWarnEnabled())\n LOGGER.warn(\"Unable to parse the \" + att.getName()\n + \" attribute string: \" + e.getMessage(), e);\n }\n \n }\n }\n return null;\n \n }\n}\n" }, { "alpha_fraction": 0.4305279552936554, "alphanum_fraction": 0.46962669491767883, "avg_line_length": 25.710914611816406, "blob_id": "f0648a267ee314d9ff0cc2e675946ff7cba24278", "content_id": "e4f8a15bf99fe44eae51bac678ed9020f87c1ff0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9054, "license_type": "no_license", "max_line_length": 102, "num_lines": 339, "path": "/geobatch/lamma/src/test/resources/gst_template.js", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<#function getRuntime(root)>\n <#if root.TIME_DOMAIN??>\n <#return root.TIME_DOMAIN[0]>\n </#if>\n</#function>\n{\n \"about\":{\n \"abstract\":\"Consorzio LaMMA\",\n \"contact\":\"<a href='http://www.lamma.rete.toscana.it/'>http://www.lamma.rete.toscana.it/<\\/a>.\",\n \"title\":\"Dati Meteo - Consorzio LaMMA\"\n },\n \"defaultSourceType\":\"gxp_wmssource\",\n \"isLoadedFromConfigFile\":true,\n \"map\":{\n \"center\":[\n \"1250000.0000000\",\n \"5370000.0000000\"\n ],\n \"layers\":[\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"Aerial\",\n \"selected\":false,\n \"source\":\"bing\",\n \"title\":\"Bing Aerial\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"mapnik\",\n \"selected\":false,\n \"source\":\"osm\",\n \"title\":\"Open Street Map\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"osm\",\n \"selected\":false,\n \"source\":\"mapquest\",\n \"title\":\"MapQuest OpenStreetMap\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"ROADMAP\",\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Roadmap\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"HYBRID\",\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Hybrid\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"TERRAIN\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Terrain\",\n \"visibility\":true\n }<#list event as root>,{\n \"format\":\"image/png\",\n \"group\":\"${root.WORKSPACE}_${getRuntime(root)?substring(0, 19)}\",<#-- FIXED -->\n \"name\":\"${root.WORKSPACE}:${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[],\n \"title\":\"${root.WORKSPACE} ${root.LAYERNAME}\", <#-- FIXED -->\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n <#if root.ELEVATION_DOMAIN??>\"elevation\":\"${root.ELEVATION_DOMAIN[0]}\",</#if><#-- FIXED -->\n \"time\":\"${getRuntime(root)?substring(0, 19)}\"\n }</#list>\n ],\n \"maxExtent\":[\"-20037508.34\",\"-20037508.34\",\"20037508.34\",\"20037508.34\"],\n \"maxResolution\": 156543.0339,\n \"projection\":\"EPSG:900913\",\n \"displayProjection\":\"EPSG:900913\",\n \"units\":\"m\",\n \"zoom\":2\n },\n \"mapTitle\":\"View\",\n \"modified\":false,\n \"printService\":\"http://demo1.geo-solutions.it/geoserver/pdf/\",\n \"proxy\":\"proxy/?url=\",\n \"renderToTab\":\"appTabs\",\n \"sources\":{\n <#if event[0]??>\"${event[0].WORKSPACE}\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA ${event[0].WORKSPACE}\",\n \"layerBaseParams\":{\n \"TILED\":true,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://159.213.57.108/geoserver/ows?namespace=${event[0].WORKSPACE}\"\n }</#if>,\n \"LaMMA-Stazioni\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Stazioni\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://192.168.0.46:8080/geoserver/ows?namespace=lamma_staz\"\n },\n \"LaMMA-NDVI\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA NDVI\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://159.213.57.104/geoserver_clima/ows?namespace=NDVI\"\n },\n \"LaMMA-Geologia-BD_Sottosuolo\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Geologia-BD_Sottosuolo\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://159.213.57.104/geoserver_clima/ows?namespace=Geologia-BD_Sottosuolo\"\n },\n \"LaMMA-Geologia-CriticitaGeologiche\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Geologia-CriticitaGeologiche\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://159.213.57.104/geoserver_clima/ows?namespace=Geologia-CriticitaGeologiche\"\n },\n \"LaMMA-Geologia-Risorse\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Geologia-Risorse\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://159.213.57.104/geoserver_clima/ows?namespace=Geologia-Risorse\"\n },\n \"bing\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_bingsource\"\n },\n \"google\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_googlesource\"\n },\n \"mapquest\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_mapquestsource\"\n },\n \"ol\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_olsource\"\n },\n \"osm\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_osmsource\"\n }\n },\n \"tools\":[\n {\n \"outputConfig\":{\n \"id\":\"layertree\"\n },\n \"outputTarget\":\"tree\",\n \"ptype\":\"gxp_layertree\"\n },\n {\n \"legendConfig\":{\n \"defaults\":{\n \"baseParams\":{\n \"FORMAT\":\"image/png\",\n \"HEIGHT\":12,\n \"LEGEND_OPTIONS\":\"forceLabels:on;fontSize:10\",\n \"WIDTH\":12\n },\n \"style\":\"padding:5px\"\n },\n \"legendPanelId\":\"legendPanel\"\n },\n \"outputConfig\":{\n \"autoScroll\":true,\n \"title\":\"Show Legend\"\n },\n \"outputTarget\":\"legend\",\n \"ptype\":\"gxp_legend\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_addlayers\",\n \"upload\":true\n },\n {\n \"actionTarget\":[\n \"tree.tbar\",\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_removelayer\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_removeoverlays\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_addgroup\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_removegroup\"\n },\n {\n \"actionTarget\":[\n \"tree.tbar\"\n ],\n \"ptype\":\"gxp_groupproperties\"\n },\n {\n \"actionTarget\":[\n \"tree.tbar\",\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_layerproperties\"\n },\n {\n \"actionTarget\":{\n \"index\":0,\n \"target\":\"layertree.contextMenu\"\n },\n \"ptype\":\"gxp_zoomtolayerextent\"\n },\n {\n \"actionTarget\":[\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_geonetworksearch\"\n },\n {\n \"actionTarget\":{\n \"index\":15,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_navigation\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":7,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_wmsgetfeatureinfo\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":12,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_measure\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":20,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoom\"\n },\n {\n \"actionTarget\":{\n \"index\":24,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoombox\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":22,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_navigationhistory\"\n },\n {\n \"actionTarget\":{\n \"index\":26,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoomtoextent\"\n },\n {\n \"actionTarget\":{\n \"index\":40,\n \"target\":\"paneltbar\"\n },\n \"needsAuthorization\":true,\n \"ptype\":\"gxp_saveDefaultContext\"\n },\n {\n \"actionTarget\":{\n \"index\":4,\n \"target\":\"paneltbar\"\n },\n \"customParams\":{\n \"outputFilename\":\"fdh-print\"\n },\n \"legendPanelId\":\"legendPanel\",\n \"printService\":\"http://demo1.geo-solutions.it/geoserver/pdf/\",\n \"ptype\":\"gxp_print\"\n }, {\n \"ptype\":\"gxp_playback\"\n }\n ],\n \"viewerTools\":[\n\n ],\n \"xmlJsonTranslateService\":\"http://demo1.geo-solutions.it/xmlJsonTranslate/\"\n}" }, { "alpha_fraction": 0.5448874235153198, "alphanum_fraction": 0.5506694912910461, "avg_line_length": 36.357975006103516, "blob_id": "d960933ad30ac72208022a21162e94bd86495342", "content_id": "8b9b3d90d4dab363ac51c43ac79e4c005527c9f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 19716, "license_type": "no_license", "max_line_length": 120, "num_lines": 514, "path": "/geobatch/netcdf2geotiff/src/main/java/it/geosolutions/geobatch/metocs/utils/io/Utilities.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\r\n * GeoBatch - Open Source geospatial batch processing system\r\n * http://code.google.com/p/geobatch/\r\n * Copyright (C) 2007-2008-2009 GeoSolutions S.A.S.\r\n * http://www.geo-solutions.it\r\n *\r\n * GPLv3 + Classpath exception\r\n *\r\n * This program is free software: you can redistribute it and/or modify\r\n * it under the terms of the GNU General Public License as published by\r\n * the Free Software Foundation, either version 3 of the License, or\r\n * (at your option) any later version.\r\n *\r\n * This program is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n * GNU General Public License for more details.\r\n *\r\n * You should have received a copy of the GNU General Public License\r\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\r\n */\r\npackage it.geosolutions.geobatch.metocs.utils.io;\r\n\r\nimport java.awt.Color;\r\nimport java.awt.image.ColorModel;\r\nimport java.awt.image.DataBuffer;\r\nimport java.awt.image.SampleModel;\r\nimport java.awt.image.WritableRaster;\r\nimport java.io.File;\r\nimport java.io.IOException;\r\nimport java.io.OutputStream;\r\nimport java.text.SimpleDateFormat;\r\nimport java.util.Date;\r\nimport java.util.HashMap;\r\nimport java.util.Map;\r\n\r\nimport javax.measure.unit.Unit;\r\nimport javax.media.jai.PlanarImage;\r\nimport javax.media.jai.RasterFactory;\r\nimport javax.media.jai.TiledImage;\r\n\r\nimport org.geotools.coverage.Category;\r\nimport org.geotools.coverage.CoverageFactoryFinder;\r\nimport org.geotools.coverage.GridSampleDimension;\r\nimport org.geotools.coverage.grid.GridCoverage2D;\r\nimport org.geotools.coverage.grid.GridCoverageFactory;\r\nimport org.geotools.coverage.grid.io.AbstractGridCoverageWriter;\r\nimport org.geotools.coverage.grid.io.AbstractGridFormat;\r\nimport org.geotools.coverage.grid.io.imageio.GeoToolsWriteParams;\r\nimport org.geotools.factory.Hints;\r\nimport org.geotools.gce.geotiff.GeoTiffFormat;\r\nimport org.geotools.gce.geotiff.GeoTiffWriteParams;\r\nimport org.geotools.gce.geotiff.GeoTiffWriter;\r\nimport org.geotools.resources.i18n.Vocabulary;\r\nimport org.geotools.resources.i18n.VocabularyKeys;\r\nimport org.geotools.util.NumberRange;\r\nimport org.opengis.coverage.grid.GridCoverage;\r\nimport org.opengis.geometry.Envelope;\r\nimport org.opengis.parameter.GeneralParameterValue;\r\nimport org.opengis.parameter.ParameterValueGroup;\r\nimport org.slf4j.Logger;\r\nimport org.slf4j.LoggerFactory;\r\n\r\nimport ucar.ma2.DataType;\r\n\r\n//import com.ice.tar.TarEntry;\r\n//import com.ice.tar.TarEntryEnumerator;\r\n//import com.ice.tar.TarInputStream;\r\n\r\n/**\r\n * @author Alessio\r\n * @author Carlo Cancellieri - [email protected]\r\n */\r\npublic class Utilities {\r\n /**\r\n * GeoTIFF Writer Default Params\r\n */\r\n public final static String DEFAULT_GEOSERVER_VERSION = \"2.x\";\r\n\r\n protected final static Logger LOGGER = LoggerFactory.getLogger(Utilities.class.toString());\r\n\r\n private Utilities() {\r\n\r\n }\r\n\r\n /**\r\n * \r\n * @param outDir\r\n * @param fileName\r\n * @param varName\r\n * @param userRaster\r\n * @param envelope\r\n * @param compressionType\r\n * @param compressionRatio\r\n * @param tileSize\r\n * @return\r\n * @throws IOException\r\n * @throws IllegalArgumentException\r\n */\r\n public static File storeCoverageAsGeoTIFF(final File outDir, final String coverageName,\r\n final CharSequence varName, WritableRaster userRaster, final double inNoData,\r\n Envelope envelope, final String compressionType, final double compressionRatio,\r\n final int tileSize) throws IllegalArgumentException, IOException {\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // PREPARING A WRITE\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n if (LOGGER.isTraceEnabled())\r\n LOGGER.trace(\"Writing down the file in the decoded directory...\");\r\n final GeoTiffFormat wformat = new GeoTiffFormat();\r\n final GeoTiffWriteParams wp = new GeoTiffWriteParams();\r\n if (!Double.isNaN(compressionRatio)) {\r\n wp.setCompressionMode(GeoTiffWriteParams.MODE_EXPLICIT);\r\n wp.setCompressionType(compressionType);\r\n wp.setCompressionQuality((float) compressionRatio);\r\n }\r\n wp.setTilingMode(GeoToolsWriteParams.MODE_EXPLICIT);\r\n wp.setTiling(tileSize, tileSize);\r\n final ParameterValueGroup wparams = wformat.getWriteParameters();\r\n wparams.parameter(AbstractGridFormat.GEOTOOLS_WRITE_PARAMS.getName().toString()).setValue(wp);\r\n\r\n // keep original name\r\n final File outFile = new File(outDir, coverageName.toString() + \".tiff\");\r\n\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // ACQUIRING A WRITER AND PERFORMING A WRITE\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n final Hints hints = new Hints(Hints.TILE_ENCODING, \"raw\");\r\n final GridCoverageFactory factory = CoverageFactoryFinder.getGridCoverageFactory(hints);\r\n\r\n final SampleModel iSampleModel = userRaster.getSampleModel();\r\n final ColorModel iColorModel = PlanarImage.createColorModel(iSampleModel);\r\n TiledImage image = new TiledImage(0, 0, userRaster.getWidth(), userRaster.getHeight(), 0,\r\n 0, iSampleModel, iColorModel);\r\n image.setData(userRaster);\r\n\r\n Unit<?> uom = null;\r\n final Category nan;\r\n final Category values;\r\n if (Double.isNaN(inNoData)) {\r\n nan = new Category(Vocabulary.formatInternational(VocabularyKeys.NODATA), new Color(0,\r\n 0, 0, 0), 0);\r\n values = new Category(\"values\", new Color[] { new Color(255, 0, 0, 0) }, NumberRange\r\n .create(1, 255), NumberRange.create(0, 9000));\r\n\r\n } else {\r\n nan = new Category(Vocabulary.formatInternational(VocabularyKeys.NODATA),\r\n new Color[] { new Color(0, 0, 0, 0) }, NumberRange.create(0, 0), NumberRange\r\n .create(inNoData == 0 ? -0.000001 : inNoData, inNoData == 0 ? -0.000001 : inNoData));\r\n values = new Category(\"values\", new Color[] { new Color(255, 0, 0, 0) }, NumberRange\r\n .create(1, 255), NumberRange.create(inNoData + Math.abs(inNoData == 0 ? -0.000001 : inNoData) * 0.1,\r\n inNoData + Math.abs(inNoData == 0 ? -0.000001 : inNoData) * 10));\r\n\r\n }\r\n\r\n // ///////////////////////////////////////////////////////////////////\r\n //\r\n // Sample dimension\r\n //\r\n //\r\n // ///////////////////////////////////////////////////////////////////\r\n final GridSampleDimension band = new GridSampleDimension(coverageName, new Category[] {\r\n nan, values }, uom).geophysics(true);\r\n final Map<String, Double> properties = new HashMap<String, Double>();\r\n properties.put(\"GC_NODATA\", new Double(inNoData));\r\n\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // Coverage\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n GridCoverage coverage = null;\r\n if (iColorModel != null)\r\n coverage = factory.create(varName, image, envelope, new GridSampleDimension[] { band },\r\n null, properties);\r\n else\r\n coverage = factory.create(varName, userRaster, envelope,\r\n new GridSampleDimension[] { band });\r\n\r\n final AbstractGridCoverageWriter writer = (AbstractGridCoverageWriter) new GeoTiffWriter(\r\n outFile);\r\n writer.write(coverage, (GeneralParameterValue[]) wparams.values().toArray(\r\n new GeneralParameterValue[1]));\r\n\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // PERFORMING FINAL CLEAN UP AFTER THE WRITE PROCESS\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n if (writer!=null)\r\n \twriter.dispose();\r\n\r\n return outFile;\r\n }\r\n\r\n /**\r\n * \r\n * @param outDir\r\n * @param fileName\r\n * @param varName\r\n * @param userRaster\r\n * @param envelope\r\n * @param compressionType\r\n * @param compressionRatio\r\n * @param tileSize\r\n * @return\r\n * @throws IOException\r\n * @throws IllegalArgumentException\r\n */\r\n public static File storeCoverageAsGeoTIFF(final File outDir, final String fileName,\r\n final GridCoverage2D coverage, final String compressionType,\r\n final double compressionRatio, final int tileSize) throws IllegalArgumentException,\r\n IOException {\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // PREPARING A WRITE\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n if (LOGGER.isInfoEnabled())\r\n LOGGER.info(\"Writing down the file in the decoded directory...\");\r\n final GeoTiffFormat wformat = new GeoTiffFormat();\r\n final GeoTiffWriteParams wp = new GeoTiffWriteParams();\r\n if (!Double.isNaN(compressionRatio)) {\r\n wp.setCompressionMode(GeoTiffWriteParams.MODE_EXPLICIT);\r\n wp.setCompressionType(compressionType);\r\n wp.setCompressionQuality((float) compressionRatio);\r\n }\r\n wp.setTilingMode(GeoToolsWriteParams.MODE_EXPLICIT);\r\n wp.setTiling(tileSize, tileSize);\r\n final ParameterValueGroup wparams = wformat.getWriteParameters();\r\n wparams.parameter(AbstractGridFormat.GEOTOOLS_WRITE_PARAMS.getName().toString()).setValue(\r\n wp);\r\n\r\n // keep original name\r\n final File outFile = new File(outDir, fileName.toString() + \".tiff\");\r\n\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // ACQUIRING A WRITER AND PERFORMING A WRITE\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n // final Hints hints = new Hints(Hints.TILE_ENCODING, \"raw\");\r\n final AbstractGridCoverageWriter writer = (AbstractGridCoverageWriter) new GeoTiffWriter(\r\n outFile);\r\n writer.write(coverage, (GeneralParameterValue[]) wparams.values().toArray(\r\n new GeneralParameterValue[1]));\r\n\r\n // /////////////////////////////////////////////////////////////////////\r\n //\r\n // PERFORMING FINAL CLEAN UP AFTER THE WRITE PROCESS\r\n //\r\n // /////////////////////////////////////////////////////////////////////\r\n writer.dispose();\r\n\r\n return outFile;\r\n }\r\n\r\n /**\r\n * \r\n * @param tempFile\r\n * @return\r\n * @throws IOException\r\n \r\n public static File decompress(final String prefix, final File inputFile, final File tempFile)\r\n throws IOException {\r\n final File tmpDestDir = createTodayPrefixedDirectory(prefix, new File(tempFile.getParent()));\r\n\r\n String ext = FilenameUtils.getExtension(inputFile.getName());\r\n\r\n if (ext.equalsIgnoreCase(\"tar\")) {\r\n final TarInputStream stream = new TarInputStream(new FileInputStream(inputFile));\r\n final TarEntryEnumerator entryEnum = new TarEntryEnumerator(stream);\r\n\r\n if (stream == null) {\r\n throw new IOException(\"Not valid archive file type.\");\r\n }\r\n\r\n TarEntry entry;\r\n while (entryEnum.hasMoreElements()) {\r\n entry = (TarEntry) entryEnum.nextElement();\r\n final String entryName = entry.getName();\r\n\r\n if (entry.isDirectory()) {\r\n // Assume directories are stored parents first then\r\n // children.\r\n (new File(tmpDestDir, entry.getName())).mkdir();\r\n continue;\r\n }\r\n\r\n byte[] buf = new byte[(int) entry.getSize()];\r\n stream.read(buf);\r\n\r\n File newFile = new File(tmpDestDir.getAbsolutePath(), entryName);\r\n FileOutputStream fos = new FileOutputStream(newFile);\r\n try {\r\n saveCompressedStream(buf, fos, buf.length);\r\n } catch (IOException e) {\r\n stream.close();\r\n IOException ioe = new IOException(\"Not valid archive file type.\");\r\n ioe.initCause(e);\r\n throw ioe;\r\n } finally {\r\n fos.flush();\r\n fos.close();\r\n }\r\n }\r\n stream.close();\r\n\r\n } else if (ext.equalsIgnoreCase(\"zip\")) {\r\n ZipFile zipFile = new ZipFile(inputFile);\r\n\r\n Enumeration<? extends ZipEntry> entries = zipFile.entries();\r\n\r\n while (entries.hasMoreElements()) {\r\n ZipEntry entry = (ZipEntry) entries.nextElement();\r\n InputStream stream = zipFile.getInputStream(entry);\r\n\r\n if (entry.isDirectory()) {\r\n // Assume directories are stored parents first then\r\n // children.\r\n (new File(tmpDestDir, entry.getName())).mkdir();\r\n continue;\r\n }\r\n\r\n File newFile = new File(tmpDestDir, entry.getName());\r\n FileOutputStream fos = new FileOutputStream(newFile);\r\n try {\r\n byte[] buf = new byte[1024];\r\n int len;\r\n\r\n while ((len = stream.read(buf)) >= 0)\r\n saveCompressedStream(buf, fos, len);\r\n\r\n } catch (IOException e) {\r\n zipFile.close();\r\n IOException ioe = new IOException(\"Not valid COAMPS archive file type.\");\r\n ioe.initCause(e);\r\n throw ioe;\r\n } finally {\r\n fos.flush();\r\n fos.close();\r\n\r\n stream.close();\r\n }\r\n }\r\n zipFile.close();\r\n }\r\n\r\n return tmpDestDir;\r\n }\r\n */\r\n /**\r\n * @param len\r\n * @param stream\r\n * @param fos\r\n * @return\r\n * @throws IOException\r\n */\r\n public static void saveCompressedStream(final byte[] buffer, final OutputStream out,\r\n final int len) throws IOException {\r\n try {\r\n out.write(buffer, 0, len);\r\n\r\n } catch (Exception e) {\r\n out.flush();\r\n out.close();\r\n IOException ioe = new IOException(\"Not valid archive file type.\");\r\n ioe.initCause(e);\r\n throw ioe;\r\n }\r\n }\r\n\r\n /**\r\n * Create a subDirectory having the actual date as name, within a specified destination\r\n * directory.\r\n * \r\n * @param destDir\r\n * the destination directory where to build the \"today\" directory.\r\n * @param inputFileName\r\n * @return the created directory.\r\n */\r\n public final static File createTodayDirectory(File destDir, String inputFileName) {\r\n return createTodayDirectory(destDir, inputFileName, false);\r\n }\r\n\r\n /**\r\n * Create a subDirectory having the actual date as name, within a specified destination\r\n * directory.\r\n * \r\n * @param destDir\r\n * the destination directory where to build the \"today\" directory.\r\n * @param inputFileName\r\n * @return the created directory.\r\n */\r\n public final static File createTodayDirectory(File destDir, String inputFileName,\r\n final boolean withTime) {\r\n final SimpleDateFormat SDF = withTime ? new SimpleDateFormat(\"yyyy_MM_dd_hhmmssSSS\")\r\n : new SimpleDateFormat(\"yyyy_MM_dd\");\r\n final String newPath = (new StringBuffer(destDir.getAbsolutePath().trim()).append(\r\n File.separatorChar).append(SDF.format(new Date())).append(\"_\")\r\n .append(inputFileName)).toString();\r\n File dir = new File(newPath);\r\n if (!dir.exists()){\r\n if (dir.mkdirs()){\r\n return dir;\r\n }\r\n else\r\n return null;\r\n }\r\n return dir;\r\n }\r\n\r\n /**\r\n * Create a subDirectory having the actual date as name, within a specified destination\r\n * directory.\r\n * \r\n * @param prefix\r\n * @param parent\r\n * the destination directory where to build the \"today\" directory.\r\n * @return the created directory.\r\n */\r\n public static File createTodayPrefixedDirectory(final String prefix, final File parent) {\r\n final SimpleDateFormat SDF_HMS = new SimpleDateFormat(\"yyyy_MM_dd_hhmmssSSS\");\r\n final String newPath = (new StringBuffer(parent.getAbsolutePath().trim()).append(\r\n File.separatorChar).append(prefix).append(File.separatorChar).append(SDF_HMS\r\n .format(new Date()))).toString();\r\n File dir = new File(newPath);\r\n if (!dir.exists()){\r\n if (dir.mkdirs()){\r\n return dir;\r\n }\r\n else\r\n return null;\r\n }\r\n return dir;\r\n \r\n }\r\n\r\n public static int getDataType(final DataType varDataType) {\r\n int dataType = DataBuffer.TYPE_UNDEFINED;\r\n if (varDataType == DataType.FLOAT)\r\n dataType = DataBuffer.TYPE_FLOAT;\r\n else if (varDataType == DataType.DOUBLE)\r\n dataType = DataBuffer.TYPE_DOUBLE;\r\n else if (varDataType == DataType.BYTE)\r\n dataType = DataBuffer.TYPE_BYTE;\r\n else if (varDataType == DataType.SHORT)\r\n dataType = DataBuffer.TYPE_SHORT;\r\n else if (varDataType == DataType.INT)\r\n dataType = DataBuffer.TYPE_INT;\r\n return dataType;\r\n }\r\n\r\n public static SampleModel getSampleModel(final DataType varDataType, final int width,\r\n final int height, final int numBands) {\r\n final int dataType = Utilities.getDataType(varDataType);\r\n return RasterFactory.createBandedSampleModel(dataType, // data type\r\n width, // width\r\n height, // height\r\n numBands); // num bands\r\n }\r\n\r\n /**\r\n * @param workingDir\r\n * @param inputFileName\r\n * @return\r\n */\r\n public static File createDirectory(File workingDir, String inputFileName) {\r\n File newDir = new File(workingDir, inputFileName);\r\n if (!newDir.exists()){\r\n if (newDir.mkdirs()){\r\n return newDir;\r\n }\r\n else\r\n return null;\r\n }\r\n return newDir;\r\n }\r\n \r\n /**\r\n * Reverse the order of a String array.\r\n * @param elements\r\n */\r\n public static void reverse(String[] elements) {\r\n \tif (elements != null){\r\n \tfinal int length = elements.length;\r\n \tfinal int half = length/2;\r\n \tString temp = \"\";\r\n \tfor (int i=0;i<half;i++){\r\n \t\ttemp = elements[i];\r\n \t\telements[i] = elements[length-1-i];\r\n \t\telements[length-1-i] = temp;\r\n \t}\r\n }\r\n\t}\r\n\r\n\tpublic static String chainValues(String[] timePositions) {\r\n\t\tif (timePositions != null){\r\n\t\t\tfinal int size = timePositions.length;\r\n\t\t\tStringBuilder sb = new StringBuilder();\r\n\t\t\tint i = 0;\r\n\t\t\tfor (; i < size - 1; i++){\r\n\t\t\t\tsb.append(timePositions[i]).append(\",\");\r\n\t\t\t}\r\n\t\t\tsb.append(timePositions[i]);\r\n\t\t\treturn sb.toString();\r\n\t\t}\r\n\t\treturn null;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.8170731663703918, "avg_line_length": 81, "blob_id": "abd31ecfe82f759c1ff784c4d98562fc8772cc5f", "content_id": "88045c926a5cc813758604c6fd0ad6fc72e84761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 82, "license_type": "no_license", "max_line_length": 81, "num_lines": 1, "path": "/README.md", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "This is an initial porting of the sourcerepo lamma/lamma_2012 branch into github.\n" }, { "alpha_fraction": 0.6393197178840637, "alphanum_fraction": 0.6508635878562927, "avg_line_length": 39.102474212646484, "blob_id": "dbc1fb9e4c84a218a34f7a06410ae82faeca6dc3", "content_id": "9f112ca7038f4c0751a8d66ec4056799ea970e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11348, "license_type": "no_license", "max_line_length": 150, "num_lines": 283, "path": "/geobatch/lamma/src/test/resources/python/utils.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from osgeo.gdalconst import GA_ReadOnly\nimport gdal\nimport osr\nuse_osgeo = False\ntry:\n from osgeo import _gdal\n use_osgeo = True\nexcept ImportError:\n import _gdal\n\ndef GetBlockSize(band):\n if (use_osgeo):\n return band.GetBlockSize()\n else:\n x = _gdal.ptrcreate('int', 0, 2)\n _gdal.GDALGetBlockSize(band._o, x, _gdal.ptradd(x, 1))\n result = (_gdal.ptrvalue(x, 0), _gdal.ptrvalue(x, 1))\n _gdal.ptrfree(x)\n return result\n\n\n \ndef printXY(ds):\n srs = osr.SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n srsLatLong = srs.CloneGeogCS()\n ct = osr.CoordinateTransformation(srs,srsLatLong)\n xyz=[]\n rows = ds.RasterYSize\n cols = ds.RasterXSize\n xyz.append(ct.TransformPoint(0,0))\n xyz.append(ct.TransformPoint(0,rows))\n xyz.append(ct.TransformPoint(cols,rows))\n xyz.append(ct.TransformPoint(cols,0))\n xyz.append(ct.TransformPoint(0,0))\n index=1;\n for point3d in xyz:\n print 'P',index,' lat: ',point3d[0],' lon: ',point3d[1]\n index=index+1\n \n# Build Spatial Reference object based on coordinate system, fetched from the\n # opened dataset\n# http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/tolatlong.py\ndef toLatLon(ds):\n srs = osr.SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n srsLatLong = srs.CloneGeogCS()\n ct = osr.CoordinateTransformation(srs, srsLatLong)\n rows = ds.RasterYSize\n cols = ds.RasterXSize\n (int, lat, height) = ct.TransformPoint(0, 0)\n # Report results\n# print('pixel: %g\\t\\t\\tline: %g' % (pixel, line))\n print('latitude: %fd\\t\\tlongitude: %fd' % (lat, int))\n print('latitude: %s\\t\\tlongitude: %s' % (gdal.DecToDMS(lat, 'Lat', 2), gdal.DecToDMS(int, 'Long', 2)))\n\n# http://lists.osgeo.org/pipermail/gdal-dev/2010-December/026959.html\ndef LatLon(ds):\n cols = ds.RasterXSize\n rows = ds.RasterYSize\n geomatrix=ds.GetGeoTransform()\n \n (XUL,YUL,XLR,YLR)=getBB(ds)\n (success, inv_geometrix) = gdal.InvGeoTransform(geomatrix)\n# if (success is not True):\n# print 'invGeoTransform failure'\n# return False\n xUL = inv_geometrix[0] + inv_geometrix[1] * XUL + inv_geometrix[2] * YUL\n yUL = inv_geometrix[3] + inv_geometrix[4] * XUL + inv_geometrix[5] * YUL\n xLR = inv_geometrix[0] + inv_geometrix[1] * XLR + inv_geometrix[2] * YLR\n yLR = inv_geometrix[3] + inv_geometrix[4] * XLR + inv_geometrix[5] * YLR\n print xLR,yLR,xUL,yUL\n xdif = xLR - xUL\n ydif = yUL - yLR\n print xdif,ydif\n \n# return (lon,lat)\ndef getBB(ds):\n (x, deltaX, rotationX, y, rotationY, deltaY) = ds.GetGeoTransform()\n Nx = ds.RasterXSize\n Ny = ds.RasterYSize\n lon = x + deltaX * Nx + rotationX * Ny\n lat = y + rotationY * Nx + deltaY * Ny\n print (x,y,lon,lat)\n return (x,y,lon,lat)\n\n\ndef warp(srcPath, xSize, ySize, src_geomatrix, src_srs, dst_geomatrix, dst_srs, cBlockSize, rBlockSize):\n #http://code.google.com/p/tilers-tools/source/browse/tilers_tools/gdal_tiler.py\n warp_vrt='''<VRTDataset rasterXSize=\"%(xsize)d\" rasterYSize=\"%(ysize)d\" subClass=\"VRTWarpedDataset\">\n <SRS>%(srs)s</SRS>\n %(geo_transform)s\n <VRTRasterBand dataType=\"Float32\" band=\"1\" subClass=\"VRTWarpedRasterBand\" />\n <BlockXSize>%(blxsize)d</BlockXSize>\n <BlockYSize>%(blysize)d</BlockYSize>\n <GDALWarpOptions>\n <!-- <WarpMemoryLimit>6.71089e+07</WarpMemoryLimit> -->\n <ResampleAlg>%(ResampleAlg)s</ResampleAlg>\n <WorkingDataType>Float32</WorkingDataType>\n <SourceDataset relativeToVRT=\"0\">%(src_path)s</SourceDataset>\n <Transformer>\n <ApproxTransformer>\n <MaxError>0.125</MaxError>\n <BaseTransformer>\n <GenImgProjTransformer>\n%(src_transform)s\n%(dst_transform)s\n <ReprojectTransformer>\n <ReprojectionTransformer>\n <SourceSRS>%(src_srs)s</SourceSRS>\n <TargetSRS>%(dst_srs)s</TargetSRS>\n </ReprojectionTransformer>\n </ReprojectTransformer>\n </GenImgProjTransformer>\n </BaseTransformer>\n </ApproxTransformer>\n </Transformer>\n <BandList>\n <BandMapping src=\"1\" dst=\"1\" />\n </BandList>\n </GDALWarpOptions>\n</VRTDataset>\n'''\n geotr_templ=' <GeoTransform> %r, %r, %r, %r, %r, %r</GeoTransform>\\n'\n warp_dst_geotr= ' <DstGeoTransform> %r, %r, %r, %r, %r, %r</DstGeoTransform>'\n warp_dst_igeotr=' <DstInvGeoTransform> %r, %r, %r, %r, %r, %r</DstInvGeoTransform>'\n warp_src_geotr= ' <SrcGeoTransform> %r, %r, %r, %r, %r, %r</SrcGeoTransform>'\n warp_src_igeotr=' <SrcInvGeoTransform> %r, %r, %r, %r, %r, %r</SrcInvGeoTransform>'\n\n# if (applyWarp):\n# for b in len(range(inBand)):\n# if (b <> hyResImage):\n # apply warp\n \n (success,inv_src_geomatrix)=gdal.InvGeoTransform(src_geomatrix)\n src_transform='%s\\n%s' % (warp_src_geotr % src_geomatrix,warp_src_igeotr % inv_src_geomatrix)\n (success,inv_dst_geomatrix)=gdal.InvGeoTransform(dst_geomatrix)\n dst_transform='%s\\n%s' % (warp_dst_geotr % dst_geomatrix,warp_dst_igeotr % inv_dst_geomatrix)\n\n geotr_txt=geotr_templ % src_geomatrix\n\n vrt_text=warp_vrt % {\n 'xsize': xSize,\n 'ysize': ySize,\n 'geo_transform': geotr_txt,\n 'srs': dst_srs,\n 'blxsize': cBlockSize,\n 'blysize': rBlockSize,\n 'ResampleAlg': 'NearestNeighbour',#GRA_NearestNeighbour\n 'src_path': srcPath,\n 'src_srs': src_srs,\n 'dst_srs': dst_srs,\n 'src_transform': src_transform,\n 'dst_transform': dst_transform,\n }\n \n # temp_vrt=os.path.join(self.dest,self.base+'.tmp.vrt') # auxilary VRT file\n # self.temp_files.append(temp_vrt)\n # with open(temp_vrt,'w') as f:\n # f.write(vrt_text)\n # warp base raster\n\n return vrt_text\n\n\n\n\n\n######## UNUSED\n\n # http://www.gdal.org/gdalwarper_8h.html#a4ad252bc084421b47428973a55316421\n #GDALDatasetH GDALCreateWarpedVRT ( GDALDatasetH hSrcDS,\n # int nPixels,\n # int nLines,\n # double * padfGeoTransform,\n # GDALWarpOptions * psOptions \n # ) \n #\n #Create virtual warped dataset.\n #\n #This function will create a warped virtual file representing the input image warped based on a provided transformation. \n # Output bounds and resolution are provided explicitly.\n #\n #Note that the constructed GDALDatasetH will acquire one or more references to the passed in hSrcDS.\n # Reference counting semantics on the source dataset should be honoured. That is, don't just GDALClose() \n # it unless it was opened with GDALOpenShared().\n #\n #Parameters:\n # hSrcDS The source dataset.\n # nPixels Width of the virtual warped dataset to create\n # nLines Height of the virtual warped dataset to create\n # padfGeoTransform Geotransform matrix of the virtual warped dataset to create\n # psOptions Warp options. Must be different from NULL.\n #\n #Returns:\n # NULL on failure, or a new virtual dataset handle on success.\n\n# http://www.gdal.org/gdalwarper_8h.html#ab5a8723d68786e7554f1ad4c0a6fa8d3\n#GDALDatasetH GDALAutoCreateWarpedVRT ( GDALDatasetH hSrcDS,\n# const char * pszSrcWKT,\n# const char * pszDstWKT,\n# GDALResampleAlg eResampleAlg,\n# double dfMaxError,\n# const GDALWarpOptions * psOptionsIn \n# ) \n#This function will create a warped virtual file representing the input image warped into the target coordinate system.\n# A GenImgProj transformation is created to accomplish any required GCP/Geotransform warp and reprojection to the target coordinate system.\n# The output virtual dataset will be \"northup\" in the target coordinate system. The GDALSuggestedWarpOutput() function is used to determine\n# the bounds and resolution of the output virtual file which should be large enough to include all the input image\n#\n#Note that the constructed GDALDatasetH will acquire one or more references to the passed in hSrcDS.\n#Reference counting semantics on the source dataset should be honoured. That is, don't just GDALClose() it unless it was opened with GDALOpenShared().\n#\n#The returned dataset will have no associated filename for itself.\n#If you want to write the virtual dataset description to a file, use the GDALSetDescription() function (or SetDescription() method) on\n#the dataset to assign a filename before it is closed.\n#Parameters:\n# hSrcDS The source dataset.\n# pszSrcWKT The coordinate system of the source image. If NULL, it will be read from the source image.\n# pszDstWKT The coordinate system to convert to. If NULL no change of coordinate system will take place.\n# eResampleAlg One of GRA_NearestNeighbour, GRA_Bilinear, GRA_Cubic or GRA_CubicSpline. Controls the sampling method used.\n# dfMaxError Maximum error measured in input pixels that is allowed in approximating the transformation (0.0 for exact calculations).\n# psOptionsIn Additional warp options, normally NULL.\n#Returns:\n# NULL on failure, or a new virtual dataset handle on success.\n\n\n\n\n# ds - dataset to warp\n# driver - the driver to use to write the image\n# dst_path - path to write the warped image\n# dst_wkt - The coordinate system to convert to. If NULL no change of coordinate system will take place.\ndef warpToFile(ds, driver, dst_path, dst_wkt):\n \n error_threshold = 0.125 # error threshold --> use same value as in gdalwarp\n resampling = gdal.GRA_NearestNeighbour\n \n tmp_ds = gdal.AutoCreateWarpedVRT( ds, \\\n None, # src_wkt : left to default value --> will use the one from source \\\n dst_wkt, \\\n resampling, \\\n error_threshold)\n dst_xsize = tmp_ds.RasterXSize\n dst_ysize = tmp_ds.RasterYSize\n dst_gt = tmp_ds.GetGeoTransform()\n tmp_ds = None\n \n # Desfine target SRS\n# dst_srs = osr.SpatialReference()\n# dst_srs.ImportFromWkt(ds.GetProjectionRef())\n# dst_wkt = dst_srs.ExportToWkt()\n \n # Now create the true target dataset\n dst_ds = driver.Create(dst_path, dst_xsize, dst_ysize,ds.RasterCount)\n dst_ds.SetProjection(ds.GetProjectionRef())\n dst_ds.SetGeoTransform(dst_gt)\n \n # And run the reprojection\n cbk=progress_callback\n cbk_user_data = None # value for last parameter of above warp_27_progress_callback\n \n gdal.ReprojectImage( ds, \\\n dst_ds, \\\n None, # src_wkt : left to default value --> will use the one from source \\\n None, # dst_wkt : left to default value --> will use the one from destination \\\n resampling, \\\n 0, # WarpMemoryLimit : left to default value \\\n error_threshold, \\\n cbk, # Progress callback : could be left to None or unspecified for silent progress\n cbk_user_data) # Progress callback user data\n \n # Done !\n dst_ds = None\n \n # Check that we have the same result as produced by 'gdalwarp -rb -t_srs EPSG:4326 ../gcore/data/byte.tif tmp/warp_27.tif'\n# ds = gdal.Open('tmp/warp_27.tif')\n# cs = ds.GetRasterBand(1).Checksum()\n# ds = None\n \ndef progress_callback(pct, message, user_data):\n #print(pct)\n return 1" }, { "alpha_fraction": 0.7875458002090454, "alphanum_fraction": 0.791208803653717, "avg_line_length": 36.71428680419922, "blob_id": "7a26eeb158eb2f3043809f295e3baa20a0540be9", "content_id": "a6192fc250eb829989a054eb44d571a74241a60a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 65, "num_lines": 7, "path": "/py_examples/produceWV.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceGrayOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"WV.tif\"\r\n\r\noutputImage = produceGrayOutputImage(inputDir, baseFileName, \"5\")\r\noutputImage.save(out_filePath)\r\n\r\n" }, { "alpha_fraction": 0.5615406036376953, "alphanum_fraction": 0.5685818791389465, "avg_line_length": 29.806941986083984, "blob_id": "08b45cf45ff5c921d12d1a035cfc4ea292b787bd", "content_id": "d898c62d87fe87b39b869082c1b21b5f24a4b082", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 14202, "license_type": "no_license", "max_line_length": 138, "num_lines": 461, "path": "/geobatch/lamma/pom.xml", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- ======================================================================= \n\tMaven Project Configuration File GeoSolutions GeoBatch Project http://geobatch.codehaus.org \n\tVersion: $Id: pom.xml 329 2011-06-14 13:00:49Z ccancellieri $ ======================================================================= -->\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n\txsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 \n http://maven.apache.org/maven-v4_0_0.xsd\">\n\t<modelVersion>4.0.0</modelVersion>\n\n <parent>\n\t <groupId>it.geosolutions.geobatch.lamma</groupId>\n\t <artifactId>gb-lamma</artifactId>\n\t <version>1.3-RC2</version>\n </parent>\n\n\t<groupId>it.geosolutions.geobatch.lamma</groupId>\n\t<artifactId>gb-lamma-utils</artifactId>\n\t<version>1.3-RC2</version>\n\n <packaging>jar</packaging>\n\n\t<name>LaMMa utils for GeoBatch flows</name>\n\t<description>GeoSolutions GeoBatch LaMMa misc.</description>\n\n\t<url>http://www.geo-solutions.it/maven_reports/gb/</url><!--<url>flowmanagers/</url> -->\n\n\t<scm>\n\t\t<connection>\n scm:svn:http://svn.geotools.org/geotools/trunk/gt/modules/<!--flowmanagers-->\n </connection>\n\t\t<url>http://svn.geotools.org/geotools/trunk/gt/modules/<!--flowmanagers--></url>\n\t</scm>\n\n\t<licenses>\n\t\t<license>\n\t\t\t<name>Lesser General Public License (LGPL)</name>\n\t\t\t<url>http://www.gnu.org/copyleft/lesser.txt</url>\n\t\t\t<distribution>repo</distribution>\n\t\t</license>\n\t</licenses>\n\n\t<distributionManagement>\n\t\t<repository>\n\t\t\t<uniqueVersion>false</uniqueVersion>\n\t\t\t<id>geosolutions</id>\n\t\t\t<url>ftp://maven.geo-solutions.it</url>\n\t\t</repository>\n\t</distributionManagement>\n\n\t<!-- Profiles set on the command-line overwrite default properties. -->\n\t<profiles>\n\t\t<profile>\n\t\t\t<id>extensive.tests</id>\n\t\t\t<properties>\n\t\t\t\t<extensive.tests>true</extensive.tests>\n\t\t\t</properties>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>interactive.tests</id>\n\t\t\t<properties>\n\t\t\t\t<interactive.tests>true</interactive.tests>\n\t\t\t</properties>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>site.build</id>\n\t\t\t<properties>\n\t\t\t\t<allow.test.skip>false</allow.test.skip>\n\t\t\t\t<allow.test.failure.ignore>true</allow.test.failure.ignore>\n\t\t\t\t<extensive.tests>true</extensive.tests>\n\t\t\t</properties>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>online</id>\n\t\t\t<properties>\n\t\t\t\t<online.skip.pattern>disabled</online.skip.pattern>\n\t\t\t</properties>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>stress</id>\n\t\t\t<properties>\n\t\t\t\t<stress.skip.pattern>disabled</stress.skip.pattern>\n\t\t\t</properties>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>java6</id>\n\t\t\t<activation>\n\t\t\t\t<jdk>1.6</jdk>\n\t\t\t</activation>\n\t\t\t<reporting>\n\t\t\t\t<plugins>\n\t\t\t\t\t<plugin>\n\t\t\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t\t\t<artifactId>maven-javadoc-plugin</artifactId>\n\t\t\t\t\t\t<version>2.7</version>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<source>1.6</source>\n\t\t\t\t\t\t</configuration>\n\t\t\t\t\t</plugin>\n\t\t\t\t</plugins>\n\t\t\t</reporting>\n\t\t</profile>\n\t\t<profile>\n\t\t\t<id>site.geosolutions</id>\n\t\t\t<distributionManagement>\n\t\t\t\t<site>\n\t\t\t\t\t<id>site-geosolutions</id>\n\t\t\t\t\t<name>Web site for Maven reports</name>\n\t\t\t\t\t<url>scp://www.geo-solutions.it/var/www/geo-solutions.it/maven/reports/gb</url>\n\t\t\t\t</site>\n\t\t\t</distributionManagement>\n\t\t</profile>\n\t\t<!-- deploy libs -->\n\t\t<profile>\n\t\t\t<id>deployJar</id>\n\t\t\t<activation>\n\t\t\t\t<property>\n\t\t\t\t\t<name>deployJar</name>\n\t\t\t\t</property>\n\t\t\t</activation>\n\t\t\t<modules>\n\t\t\t\t<module>deploy</module>\n\t\t\t</modules>\n\t\t</profile>\n\t</profiles>\n\n\t<!-- =========================================================== -->\n\t<!-- =========================================================== -->\n\t<dependencies>\n\n <!-- =========================================================== -->\n <!-- Specific -->\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch.metocs</groupId>\n\t\t\t<artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n\t\t</dependency>\n\n <!-- =========================================================== -->\n <!-- GeoBatch -->\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch</groupId>\n\t\t\t<artifactId>gb-action-imagemosaic</artifactId>\n\t\t</dependency>\n\n <dependency>\n <groupId>it.geosolutions</groupId>\n <artifactId>geonetwork-manager</artifactId>\n </dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch</groupId>\n\t\t\t<artifactId>gb-action-scripting</artifactId>\n\t\t</dependency>\n\n\t\t<dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geonetwork</artifactId>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch</groupId>\n \t\t\t<artifactId>gb-action-geotiff</artifactId>\n\t\t</dependency>\n\n <!-- =========================================================== -->\n <!-- GB Tools -->\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.tools</groupId>\n\t\t\t<artifactId>tools-io</artifactId>\n\t\t</dependency>\n\t\t\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.tools</groupId>\n\t\t\t<artifactId>tools-freemarker</artifactId>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.tools</groupId>\n\t\t\t<artifactId>tools-ant</artifactId>\n <version>1.1.2</version>\n\t\t</dependency>\n\n\n\t\t<!-- =========================================================== -->\n\t\t<!-- GeoStore -->\n\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geostore</groupId>\n\t\t\t<artifactId>geostore-model</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geostore</groupId>\n\t\t\t<artifactId>geostore-services-api</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geostore</groupId>\n\t\t\t<artifactId>geostore-rest-api</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geostore</groupId>\n\t\t\t<artifactId>geostore-rest-client</artifactId>\n\t\t</dependency>\n\n\t\t<!-- =========================================================== -->\n\t\t<!-- Test stuff -->\n\n <dependency>\n <groupId>org.geotools</groupId>\n <artifactId>gt-sample-data</artifactId>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>junit</groupId>\n <artifactId>junit</artifactId>\n <version>4.8.2</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.easymock</groupId>\n <artifactId>easymock</artifactId>\n <version>2.3</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.easymock</groupId>\n <artifactId>easymockclassextension</artifactId>\n <version>2.3</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>com.mockrunner</groupId>\n <artifactId>mockrunner</artifactId>\n <version>0.3.1</version>\n <scope>test</scope>\n </dependency>\n\n <!-- ===LOG4J==================================================== -->\n <dependency>\n <groupId>log4j</groupId>\n <artifactId>log4j</artifactId>\n <version>1.2.16</version>\n <scope>test</scope>\n </dependency>\n\n </dependencies>\n\n\t<build>\n\t\t<plugins>\n\t\t\t<!-- ======================================================= -->\n\t\t\t<!-- Compilation. -->\n\t\t\t<!-- ======================================================= -->\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-compiler-plugin</artifactId>\n\t\t\t\t<version>2.0.2</version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<source>1.6</source>\n\t\t\t\t\t<!-- The -source argument for the Java compiler. -->\n\t\t\t\t\t<target>1.6</target>\n\t\t\t\t\t<!-- The -target argument for the Java compiler. -->\n\t\t\t\t\t<debug>true</debug>\n\t\t\t\t\t<!-- Whether to include debugging information. -->\n\t\t\t\t\t<encoding>UTF-8</encoding>\n\t\t\t\t\t<!-- The -encoding argument for the Java compiler. -->\n\t\t\t\t</configuration>\n\t\t\t</plugin>\n\t\t\t<!-- ======================================================= -->\n\t\t\t<!-- Tests. -->\n\t\t\t<!-- ======================================================= -->\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-surefire-plugin</artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<includes>\n\t\t\t\t\t\t<include>**/*Test.java</include>\n\t\t\t\t\t</includes>\n\t\t\t\t\t<!-- <excludes> <exclude>${online.skip.pattern}</exclude> <exclude>${stress.skip.pattern}</exclude> \n\t\t\t\t\t\t<exclude>${test.exclude.pattern}</exclude> </excludes> -->\n\t\t\t\t\t<!--argLine>-Xmx${test.maxHeapSize} -Djava.awt.headless=${java.awt.headless}</argLine -->\n\t\t\t\t\t<!-- Ignores test failure only if we are generating a -->\n\t\t\t\t\t<!-- report for publication on the web site. See the -->\n\t\t\t\t\t<!-- profiles section at the begining of this pom.xml file. -->\n\t\t\t\t\t<testFailureIgnore>\n\t\t\t\t\t\t${allow.test.failure.ignore}\n </testFailureIgnore>\n\t\t\t\t\t<!-- The two following options have the opposite value of what we would \n\t\t\t\t\t\tlike. They are that way because they don't seem to work as expected with \n\t\t\t\t\t\tSurefire 2.3. TODO: Try again when Surefire 2.4 will be available. -->\n\t\t\t\t\t<!-- Option to print summary of test suites or just print the test cases \n\t\t\t\t\t\tthat has errors. -->\n\t\t\t\t\t<printSummary>true</printSummary>\n\t\t\t\t\t<!-- Redirect the unit test standard output to a file. -->\n\t\t\t\t\t<redirectTestOutputToFile>false</redirectTestOutputToFile>\n\t\t\t\t</configuration>\n\t\t\t</plugin>\n\t\t\t<!-- code coverage -->\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.codehaus.mojo</groupId>\n\t\t\t\t<artifactId>cobertura-maven-plugin</artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>clean</goal>\n\t\t\t\t\t\t</goals>\n\t\t\t\t\t</execution>\n\t\t\t\t</executions>\n\t\t\t</plugin>\n\t\t\t<!-- eclipse ide integration -->\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-eclipse-plugin</artifactId>\n\t\t\t\t<version>2.5</version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<additionalProjectnatures>\n\t\t\t\t\t\t<projectnature>org.springframework.ide.eclipse.core.springnature</projectnature>\n\t\t\t\t\t</additionalProjectnatures>\n\t\t\t\t</configuration>\n\t\t\t</plugin>\n\t\t\t<!-- ======================================================= -->\n\t\t\t<!-- JAR packaging. -->\n\t\t\t<!-- ======================================================= -->\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-jar-plugin</artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<archive>\n\t\t\t\t\t\t<manifest>\n\t\t\t\t\t\t\t<addClasspath>true</addClasspath>\n\t\t\t\t\t\t</manifest>\n\t\t\t\t\t</archive>\n\t\t\t\t</configuration>\n\t\t\t\t<executions>\n\t\t\t\t\t<!-- Creates a jar containing only settings files === -->\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>packFlowSettings</id>\n\t\t\t\t\t\t<phase>package</phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar</goal>\n\t\t\t\t\t\t</goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>flowsettings</classifier>\n\t\t\t\t\t\t\t<includes>\n\t\t\t\t\t\t\t\t<include>settings/**</include>\n\t\t\t\t\t\t\t</includes>\n\t\t\t\t\t\t</configuration>\n\t\t\t\t\t</execution>\n\t\t\t\t</executions>\n\t\t\t</plugin>\n\t\t\t<!-- ======================================================= -->\n\t\t\t<!-- Source packaging. -->\n\t\t\t<!-- ======================================================= -->\n\t\t\t<plugin>\n\t\t\t\t<inherited>true</inherited>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-source-plugin</artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<attach>false</attach>\n\t\t\t\t</configuration>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>attach-sources</id>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar</goal>\n\t\t\t\t\t\t</goals>\n\t\t\t\t\t</execution>\n\t\t\t\t</executions>\n\t\t\t</plugin>\n\t\t\t<!-- versioning -->\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-release-plugin</artifactId>\n\t\t\t\t<version>2.2.2</version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<tagNameFormat>v@{project.version}</tagNameFormat>\n\t\t\t\t</configuration>\n\t\t\t</plugin>\n\t\t</plugins>\n\t\t<!-- EXTENSIONS -->\n\t\t<extensions>\n\t\t\t<extension>\n\t\t\t\t<groupId>org.apache.maven.wagon</groupId>\n\t\t\t\t<artifactId>wagon-ftp</artifactId>\n\t\t\t\t<version>1.0-beta-7</version>\n\t\t\t</extension>\n\t\t</extensions>\n\t</build>\n\t<!-- ================================================================== -->\n\t<!-- Repositories. This is where Maven looks for dependencies. The -->\n\t<!-- Maven repository is implicit and doesn't need to be specified. -->\n\t<!-- ================================================================== -->\n\t<repositories>\n\t\t<repository>\n\t\t\t<id>geosolutions</id>\n\t\t\t<name>GeoSolutions Repository</name>\n\t\t\t<url>http://maven.geo-solutions.it</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<id>osgeo</id>\n\t\t\t<name>OsGEO Repository</name>\n\t\t\t<url>http://download.osgeo.org/webdav/geotools/</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<id>java.net</id>\n\t\t\t<name>java.net Repository</name>\n\t\t\t<url>http://download.java.net/maven/2/</url>\n\t\t</repository>\n\t\t<!-- camel -->\n\t\t<repository>\n\t\t\t<id>camel.internal.maven.repository</id>\n\t\t\t<name>Camel internal Maven Repo</name>\n\t\t\t<url>http://svn.apache.org/repos/asf/camel/m2-repo</url>\n\t\t</repository>\n\t\t<!-- TOO LONG TIMEOUT <repository> <id>JBoss</id> <url>http://repository.jboss.com/maven2</url> \n\t\t\t</repository> -->\n\t\t<repository>\n\t\t\t<id>Hibernate Spatial repo</id>\n\t\t\t<url>http://www.hibernatespatial.org/repository</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<snapshots>\n\t\t\t\t<enabled>true</enabled>\n\t\t\t</snapshots>\n\t\t\t<id>opengeo</id>\n\t\t\t<name>OpenGeo Maven Repository</name>\n\t\t\t<url>http://repo.opengeo.org</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<id>maven-restlet</id>\n\t\t\t<name>Public online Restlet repository</name>\n\t\t\t<url>http://maven.restlet.org</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<id>official maven 1</id>\n\t\t\t<name>Public online maven repository</name>\n\t\t\t<url>http://repo1.maven.org/maven2</url>\n\t\t</repository>\n\t\t<repository>\n\t\t\t<id>official maven 2</id>\n\t\t\t<name>Public online maven repository</name>\n\t\t\t<url>http://repo2.maven.org/maven2</url>\n\t\t</repository>\n\t</repositories>\n\t<!-- =========================================================== -->\n\t<!-- Plugin repositories. -->\n\t<!-- This is where Maven looks for plugin dependencies. -->\n\t<!-- =========================================================== -->\n\t<pluginRepositories>\n\t\t<pluginRepository>\n\t\t\t<id>codehaus-snapshot-plugins</id>\n\t\t\t<name>codehaus-shapshot-plugins</name>\n\t\t\t<url>http://snapshots.repository.codehaus.org/</url>\n\t\t\t<snapshots>\n\t\t\t\t<enabled>true</enabled>\n\t\t\t</snapshots>\n\t\t\t<releases>\n\t\t\t\t<enabled>false</enabled>\n\t\t\t</releases>\n\t\t</pluginRepository>\n\t</pluginRepositories>\n</project>\n" }, { "alpha_fraction": 0.748251736164093, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 38.85714340209961, "blob_id": "9a59648ebbc2575f99783dcf07dcc5beee5f5c89", "content_id": "21e0337473112241b5252d57c0fc1003c3a5d423", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 80, "num_lines": 7, "path": "/py_examples/produceCVS.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceRGBOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"CVS.tif\"\r\n\r\noutputImage = produceRGBOutputImage(inputDir, baseFileName, \"5-6\", \"4-9\", \"3-1\")\r\noutputImage.save(out_filePath)\r\n" }, { "alpha_fraction": 0.6132451891899109, "alphanum_fraction": 0.6287103891372681, "avg_line_length": 37.88557052612305, "blob_id": "fae8f3ab70fece8e57b1d84b979357cb80cad93f", "content_id": "d6eaa1ea5289afc70a4e27268c6034f36549666f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8018, "license_type": "no_license", "max_line_length": 110, "num_lines": 201, "path": "/py_examples/LammaUtils.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from optparse import OptionParser\r\nimport Image\r\nimport ImageChops\r\nimport ImageOps\r\nimport os\r\nimport sys\r\n\r\n# ---------------------------------\r\n# Channels suffix definitions\r\n# ---------------------------------\r\nCHANNELS = {\r\n 1 : lambda: \"VIS006_01\",\r\n 2 : lambda: \"VIS008_02\",\r\n 3 : lambda: \"IR_016_03\",\r\n 4 : lambda: \"IR_039_04\",\r\n 5 : lambda: \"WV_062_05\",\r\n 6 : lambda: \"WV_073_06\",\r\n 7 : lambda: \"IR_087_07\",\r\n 8 : lambda: \"IR_097_08\",\r\n 9 : lambda: \"IR_108_09\",\r\n 10 : lambda: \"IR_120_10\",\r\n 11 : lambda: \"IR_134_11\",\r\n 12 : lambda: \"HRV_12\",\r\n}\r\n\r\nDEFAULT_DATA_EXTENSION = \".tif\"\r\nDEFAULT_CONTRAST_PERCENTAGE = 5\r\nDEFAULT_REPROCESS = False\r\n\r\ncontrast = DEFAULT_CONTRAST_PERCENTAGE\r\nreprocess = DEFAULT_REPROCESS \r\n\r\n# Rescale an image to byte values and then apply autocontrast using a\r\n# specific percentage\r\ndef processInputImage(im):\r\n extrema = im.getextrema()\r\n return processInputImageExtrema(im, extrema)\r\n\r\ndef processInputImageExtrema(im, extrema):\r\n \r\n dataRange = extrema[1] - extrema[0]\r\n invRange = extrema[0] - extrema[1]\r\n scale = 255 / dataRange\r\n offset = (255*extrema[0]) / (invRange)\r\n # Rescale the image on the interval 0 - 255\r\n image2 = Image.eval(im, lambda i: i * scale + offset)\r\n \r\n # Convert the image to Byte\r\n im2 = image2.convert(\"L\")\r\n \r\n # Autocontrast the image using the specified threshold\r\n im3 = ImageOps.autocontrast(im2, contrast)\r\n return im3\r\n\r\ndef parseFileLocations():\r\n parser = OptionParser()\r\n parser.add_option(\"-i\", \"--inputDir\", dest=\"input\",\r\n help=\"Input folder containing meteo data\", metavar=\"FILE\")\r\n parser.add_option(\"-o\", \"--outputDir\", dest=\"output\",\r\n help=\"Write the output file to that folder\", metavar=\"FILE\")\r\n parser.add_option(\"-c\", \"--contrastOutliers\", dest=\"contrast\", default = DEFAULT_CONTRAST_PERCENTAGE,\r\n help=\"Specify the outliers percentage to be removed on the auto-contrast op\")\r\n parser.add_option(\"-v\", action=\"store_true\", dest=\"verbose\", default = False,\r\n help=\"Set this flag for logging\")\r\n parser.add_option(\"-r\", \"--reprocess\", action=\"store_true\", dest=\"reprocess\", default = DEFAULT_REPROCESS,\r\n help=\"Set this flag for rescaling again the resulting channel operations before merging\")\r\n \r\n \r\n (options, args) = parser.parse_args()\r\n if options.input is None:\r\n print (\"Input meteo data folder is missing. use -h for the help\")\r\n sys.exit(0)\r\n elif options.output is None:\r\n print (\"Output folder isn't specified. use -h for the help\")\r\n sys.exit(0)\r\n verbose = options.verbose\r\n contrast = options.contrast\r\n reprocess = options.reprocess\r\n\r\n inputDir = options.input\r\n\r\n if (not os.path.isdir(inputDir)):\r\n print (\"Specified Input path is not a folder. use -h for the help\")\r\n sys.exit(0)\r\n dirList = os.listdir(inputDir)\r\n\r\n out_path = options.output\r\n baseName = os.path.basename(dirList[0])\r\n \r\n index = 0\r\n length = len(baseName)\r\n for i in range (0,3):\r\n index = baseName.find('_',index + 1,length)\r\n \r\n baseFileName = baseName[0:index + 1]\r\n if verbose:\r\n print \"baseFileName:\" + baseFileName\r\n\r\n#---------------------------------\r\n# Preparing the output folder\r\n#---------------------------------\r\n if (not os.path.isdir(out_path)):\r\n os.mkdir(out_path)\r\n out_filePath = os.path.join(out_path, baseFileName)\r\n outputDir = out_path + os.sep\r\n if (not os.path.isdir(outputDir)):\r\n os.mkdir(outputDir)\r\n return inputDir, out_filePath, baseFileName\r\n \r\n \r\ndef getChannelSuffix(channelNumber):\r\n return CHANNELS.get(channelNumber)()\r\n\r\ndef getInputFileName (inputDir, baseFileName, channel):\r\n return inputDir + os.sep + baseFileName + getChannelSuffix(channel) + DEFAULT_DATA_EXTENSION\r\n\r\ndef produceRGBOutputImage (inputDir, baseFileName, r, g, b):\r\n rBand = produceBand(inputDir, baseFileName, r) \r\n if (g == r):\r\n gBand = rBand\r\n else: \r\n gBand = produceBand(inputDir, baseFileName, g)\r\n if (b == r):\r\n bBand = rBand\r\n elif (b == g):\r\n bBand = gBand\r\n else:\r\n bBand = produceBand(inputDir, baseFileName, b)\r\n return Image.merge(\"RGB\", (rBand, gBand, bBand))\r\n\r\n#def produceBand (inputDir, baseFileName, channel):\r\n# if '-' in channel:\r\n# channels = channel.split('-')\r\n# firstChannel = channels[0].strip()\r\n# secondChannel = channels[1].strip()\r\n# imageA = Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel)))\r\n# imageB = Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel)))\r\n# extremaA = imageA.getextrema()\r\n# extremaB = imageB.getextrema()\r\n# extrema = (min(extremaA[0],extremaB[0]),max(extremaA[0],extremaB[0]))\r\n# a = processInputImageExtrema(imageA, extrema)\r\n# b = processInputImageExtrema(imageB, extrema)\r\n# difference = ImageChops.subtract(a, b)\r\n# if reprocess:\r\n# return processInputImage(difference)\r\n# else:\r\n# return difference\r\n# elif '+' in channel:\r\n# channels = channel.split('+')\r\n# firstChannel = channels[0].strip()\r\n# secondChannel = channels[1].strip()\r\n# a = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel))))\r\n# b = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel))))\r\n# add = ImageChops.add(a, b)\r\n# if reprocess:\r\n# return processInputImage(add)\r\n# else:\r\n# return add\r\n# elif '>' in channel:\r\n# channels = channel.split('>')\r\n# firstChannel = channels[0].strip()\r\n# secondChannel = channels[1].strip()\r\n# a = (Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel))))\r\n# hResImage = Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel)))\r\n# hResImageSize = hResImage.size\r\n# resizedImage = a.resize((hResImageSize[0],hResImageSize[1]), Image.BILINEAR)\r\n# return processInputImage(resizedImage)\r\n# else:\r\n# return processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(channel))))\r\n\r\ndef produceBand (inputDir, baseFileName, channel):\r\n if '-' in channel:\r\n channels = channel.split('-')\r\n firstChannel = channels[0].strip()\r\n secondChannel = channels[1].strip()\r\n a = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel))))\r\n b = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel))))\r\n difference = ImageChops.subtract(a, b)\r\n return processInputImage(difference)\r\n elif '+' in channel:\r\n channels = channel.split('+')\r\n firstChannel = channels[0].strip()\r\n secondChannel = channels[1].strip()\r\n a = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel))))\r\n b = processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel))))\r\n add = ImageChops.add(a, b)\r\n return processInputImage(add)\r\n elif '>' in channel:\r\n channels = channel.split('>')\r\n firstChannel = channels[0].strip()\r\n secondChannel = channels[1].strip()\r\n a = (Image.open(getInputFileName(inputDir, baseFileName, int(firstChannel))))\r\n hResImage = Image.open(getInputFileName(inputDir, baseFileName, int(secondChannel)))\r\n hResImageSize = hResImage.size\r\n resizedImage = a.resize((hResImageSize[0],hResImageSize[1]), Image.BILINEAR)\r\n return processInputImage(resizedImage)\r\n else:\r\n return processInputImage(Image.open(getInputFileName(inputDir, baseFileName, int(channel))))\r\n\r\ndef produceGrayOutputImage (inputDir, baseFileName, channel):\r\n return produceBand(inputDir, baseFileName, channel) \r\n" }, { "alpha_fraction": 0.4111490249633789, "alphanum_fraction": 0.4234357178211212, "avg_line_length": 37.79645919799805, "blob_id": "1fde5b69e6dafbeecb1e71175916a3836430799a", "content_id": "7b5c1537a3bf8dcf6e5bdbf0971556bb11b4a253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4395, "license_type": "no_license", "max_line_length": 81, "num_lines": 113, "path": "/GEOBATCH_CONFIG_DIR/commons/gst_radar_layer_template.js", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": " <#-- NO MATCHES == NO CONTOUR -->\n <#if ( LAYERNAME?matches('.*PIPPO.*') )>\n <#if ELEVATION_DOMAIN?? >\n <#list ELEVATION_DOMAIN as ELEVATION >\n {\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}_${ELEVATION}\"],\n \"title\":\"${LAYERNAME}_${ELEVATION}\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n }\n </#list><#-- list ELEVATION -->\n <#else><#-- ELSE NO ELEVATION_DOMAIN -->\n ,{\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}\"],\n \"title\":\"${LAYERNAME}\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n }\n </#if><#-- if ELEVATION_DOMAIN -->\n <#else><#-- else if MATCHES -->\n <#if ELEVATION_DOMAIN?? >\n <#list ELEVATION_DOMAIN as ELEVATION >\n ,{\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}_${ELEVATION}\"],\n \"title\":\"${LAYERNAME} ${ELEVATION}\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n }\n ,{\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}_${ELEVATION}_contour\"],\n \"title\":\"${LAYERNAME} ${ELEVATION} contour\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n }\n </#list><#-- list ELEVATION -->\n <#else><#-- ELSE NO ELEVATION_DOMAIN -->\n ,{\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}\"],\n \"title\":\"${LAYERNAME}\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n }\n ,{\n \"format\":\"image/png\",\n \"group\":\"${WORKSPACE} ${LAYERNAME}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":0.7,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${LAYERNAME}_contour\"],\n \"title\":\"${LAYERNAME} contour\",\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n }\n </#if><#-- if ELEVATION_DOMAIN -->\n </#if><#-- if MATCHES -->\n" }, { "alpha_fraction": 0.5764739513397217, "alphanum_fraction": 0.5984050035476685, "avg_line_length": 38.897727966308594, "blob_id": "f4ebaecd3932a8cf4556f37db28e9bbbecffd000", "content_id": "0e648e72c47e53d29cf4c074595696380a0d1982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 3511, "license_type": "no_license", "max_line_length": 133, "num_lines": 88, "path": "/geobatch/netcdf2geotiff/pom.xml", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- ======================================================================= \n\tMaven Project Configuration File GeoSolutions GeoBatch Project http://geobatch.codehaus.org \n\tVersion: $Id: pom.xml 382 2010-01-07 18:00:13Z dany111 $ ======================================================================= -->\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n\txsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd\">\n\t<modelVersion>4.0.0</modelVersion>\n\t<parent>\n\t <groupId>it.geosolutions.geobatch.lamma</groupId>\n\t <artifactId>gb-lamma</artifactId>\n\t <version>1.3-RC2</version>\n\t</parent>\n\t<!-- =========================================================== -->\n\t<!-- Module Description -->\n\t<!-- =========================================================== -->\n\t<groupId>it.geosolutions.geobatch.metocs</groupId>\n\t<artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n <version>1.3-RC2</version>\n\t\n\t<packaging>jar</packaging>\n\t\n\t<name>GeoBatch action: METOCS netcdf2geotiff</name>\n\t\n\t<url>http://mvn.geo-solutions.it/maven_reports/ie/flowmanagers/</url>\n\t<scm>\n\t\t<connection>scm:svn:http://svn.geotools.org/geotools/trunk/gt/modules/flowmanagers/</connection>\n\t\t<url>http://svn.geotools.org/geotools/trunk/gt/modules/flowmanagers/</url>\n\t</scm>\n\t<description>\n\t\tGeoSolutions GeoBatch flow managers - METOCS Actions netcdf2geotiff.\n\t</description>\n\t<licenses>\n\t\t<license>\n\t\t\t<name>Lesser General Public License (LGPL)</name>\n\t\t\t<url>http://www.gnu.org/copyleft/lesser.txt</url>\n\t\t\t<distribution>repo</distribution>\n\t\t</license>\n\t</licenses>\n\t<!-- =========================================================== -->\n\t<!-- Dependency Management -->\n\t<!-- =========================================================== -->\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch</groupId>\n\t\t\t<artifactId>gb-action-imagemosaic</artifactId>\n\t\t</dependency>\n<!--\t\t<dependency>\n\t\t\t<groupId>it.geosolutions.geobatch.metocs</groupId>\n\t\t\t<artifactId>gb-action-metocs-utils</artifactId>\n\t\t</dependency>-->\n\t\t<!-- dependency> <groupId>essi-unidata</groupId> <artifactId>netcdf-java</artifactId> \n\t\t\t<version>4.0.41</version> </dependency> <dependency> <groupId>essi-unidata</groupId> \n\t\t\t<artifactId>grib</artifactId> <version>6.0.18</version> </dependency -->\n\t\t<!-- dependency> <groupId>essi-unidata</groupId> <artifactId>grib</artifactId> \n\t\t\t<version>8.0.29</version> </dependency -->\n\t\t<!-- from http://maven.geotoolkit.org/edu/ucar/netcdf/4.2.32/ -->\n\t\t<dependency>\n\t\t\t<groupId>edu.ucar</groupId>\n\t\t\t<artifactId>grib</artifactId>\n\t\t\t<version>4.3.19</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>edu.ucar</groupId>\n\t\t\t<artifactId>netcdf</artifactId>\n\t\t\t<version>4.3.19</version>\n\t\t</dependency>\n<!--\t\t<dependency>\n\t\t\t<groupId>edu.ucar</groupId>\n\t\t\t<artifactId>unidataCommon</artifactId>\n\t\t\t<version>4.3.19</version>\n\t\t</dependency> -->\n\t\t<dependency>\n\t\t\t<groupId>com.thoughtworks.xstream</groupId>\n\t\t\t<artifactId>xstream</artifactId>\n\t\t</dependency>\n\n <dependency>\n <groupId>it.geosolutions.imageio-ext</groupId>\n <artifactId>netcdf-converters</artifactId>\n <exclusions>\n <exclusion>\n <groupId>essi-unidata</groupId>\n <artifactId>netcdf-java</artifactId>\n </exclusion>\n </exclusions>\n </dependency>\n\t</dependencies>\n</project>\n" }, { "alpha_fraction": 0.7553191781044006, "alphanum_fraction": 0.7695035338401794, "avg_line_length": 38, "blob_id": "c8101bc69b07b8515c71c3725364f866f25c1b24", "content_id": "10cfd3376bfc8a3de9303b22c39f2a3cf0f47827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 75, "num_lines": 7, "path": "/py_examples/produceIR.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceRGBOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"IR.tif\"\r\n\r\noutputImage = produceRGBOutputImage(inputDir, baseFileName, \"4\", \"9\", \"10\")\r\noutputImage.save(out_filePath)\r\n\r\n" }, { "alpha_fraction": 0.3317479193210602, "alphanum_fraction": 0.4042806327342987, "avg_line_length": 51.82125473022461, "blob_id": "410f0e502c2dcfca6c2a81bcf8634648a150f82c", "content_id": "4f6eaab222c84d08bb0e260f9a11790bd8e2ea58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10933, "license_type": "no_license", "max_line_length": 172, "num_lines": 207, "path": "/GEOBATCH_CONFIG_DIR/commons/copia_tif.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Riccardo Mari\n\nimport os\nimport sys, time\nimport calc\nimport glob\nimport shutil\nimport zipfile\nimport operator\n\ntry: \n from osgeo import gdal\n import numpy\n os.chdir('.')\nexcept ImportError:\n #import gdal\n os.chdir('.')\n\ndef copy_zip(argv):\n\n if (len(sys.argv) == 0):\n print 'Usage:'\n print 'python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ... Channel_n.tif'\n\n for image in sys.argv[1:]:\n \n dirname = '/opt/data/models/'+argv[1]+'/' \n dirArray = []\n for f in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, f)):\n dirArray.append(f)\n\n lastDir = sorted(dirArray,reverse=True) \n \n src_dir = dirname+lastDir[0]\n \n for c in os.listdir(src_dir):\n if os.path.isdir(os.path.join(src_dir, c)):\n \n input_dir = src_dir+'/'+c\n \n output_dir = '/opt/geobatch/temp/zip_temp'\n \n remote_dir = '/var/www/html/download/' \n \n newpath = output_dir+'/'+argv[1]+'/'+c\n \n newpath_remote_dir = remote_dir+'/'+argv[1]+'/'+c\n \n if not os.path.exists(newpath): os.makedirs(newpath)\n \n os.system('ssh [email protected] mkdir -p '+newpath_remote_dir)\n #os.system('ssh [email protected] chmod 733 '+newpath_remote_dir)\n \n for a in os.listdir(input_dir):\n \n if \"0200.000\" in a:\n path200 = newpath+'_200_0'\n if not os.path.exists(path200): os.makedirs(path200)\n shutil.copy2(input_dir+'/'+a, path200)\n elif \"0300.000\" in a:\n path300 = newpath+'_300_0'\n if not os.path.exists(path300): os.makedirs(path300)\n shutil.copy2(input_dir+'/'+a, path300)\n elif \"0500.000\" in a:\n path500 = newpath+'_500_0'\n if not os.path.exists(path500): os.makedirs(path500)\n shutil.copy2(input_dir+'/'+a, path500)\n elif \"0600.000\" in a:\n path600 = newpath+'_600_0'\n if not os.path.exists(path600): os.makedirs(path600)\n shutil.copy2(input_dir+'/'+a, path600)\n elif \"0700.000\" in a:\n path700 = newpath+'_700_0'\n if not os.path.exists(path700): os.makedirs(path700)\n shutil.copy2(input_dir+'/'+a, path700)\n elif \"0850.000\" in a:\n path850 = newpath+'_850_0'\n if not os.path.exists(path850): os.makedirs(path850)\n shutil.copy2(input_dir+'/'+a, path850)\n elif \"0925.000\" in a:\n path925 = newpath+'_925_0'\n if not os.path.exists(path925): os.makedirs(path925)\n shutil.copy2(input_dir+'/'+a, path925)\n elif \"1000.000\" in a:\n path1000 = newpath+'_1000_0'\n if not os.path.exists(path1000): os.makedirs(path1000)\n shutil.copy2(input_dir+'/'+a, path1000)\n elif \"0000.000\" in a:\n path0000 = newpath+'_0_0'\n if not os.path.exists(path0000): os.makedirs(path0000)\n shutil.copy2(input_dir+'/'+a, path0000)\n elif \"0002.000\" in a:\n path0002 = newpath+'_2_0'\n if not os.path.exists(path0002): os.makedirs(path0002)\n shutil.copy2(input_dir+'/'+a, path0002)\n elif \"0010.000\" in a:\n path0010 = newpath+'_10_0'\n if not os.path.exists(path0010): os.makedirs(path0010)\n shutil.copy2(input_dir+'/'+a, path0010)\n elif \"0005.000\" in a:\n path0005 = newpath+'_5_0'\n if not os.path.exists(path0005): os.makedirs(path0005)\n shutil.copy2(input_dir+'/'+a, path0005) \n elif \"0025.000\" in a:\n path0025 = newpath+'_25_0'\n if not os.path.exists(path0025): os.makedirs(path0025)\n shutil.copy2(input_dir+'/'+a, path0025) \n elif \"0070.000\" in a:\n path0070 = newpath+'_70_0'\n if not os.path.exists(path0070): os.makedirs(path0070)\n shutil.copy2(input_dir+'/'+a, path0070) \n elif \"0150.000\" in a:\n path0150 = newpath+'_150_0'\n if not os.path.exists(path0150): os.makedirs(path0150)\n shutil.copy2(input_dir+'/'+a, path0150) \n else:\n pass\n \n temp_dir = '/opt/geobatch/temp/zip_temp'+'/'+argv[1] \n \n rem_dir = '/var/www/html/download/' \n \n for k in os.listdir(temp_dir):\n if \"_200_0\" in k:\n v200 = k.replace(\"_200_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v200) )\n elif \"_300_0\" in k:\n v300 = k.replace(\"_300_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v300) ) \n elif \"_500_0\" in k:\n v500 = k.replace(\"_500_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v500) ) \n elif \"_600_0\" in k:\n v600 = k.replace(\"_600_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v600) ) \n elif \"_700_0\" in k:\n v700 = k.replace(\"_700_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v700) ) \n elif \"_850_0\" in k:\n v850 = k.replace(\"_850_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v850) ) \n elif \"_925_0\" in k:\n v925 = k.replace(\"_925_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v925) ) \n elif \"_1000_0\" in k: \n v1000 = k.replace(\"_1000_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v1000) ) \n elif \"_0_0\" in k:\n v0000 = k.replace(\"_0_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0000) ) \n elif \"_2_0\" in k:\n v0002 = k.replace(\"_2_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0002) ) \n elif \"_10_0\" in k:\n v0010 = k.replace(\"_10_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0010) ) \n elif \"_5_0\" in k:\n v0005 = k.replace(\"_5_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0005) )\n elif \"_25_0\" in k:\n v0025 = k.replace(\"_25_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0025) ) \n elif \"_70_0\" in k:\n v0070 = k.replace(\"_70_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0070) ) \n elif \"_150_0\" in k:\n v0150 = k.replace(\"_150_0\",\"\")\n os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+v0150) ) \n else:\n pass\n #os.system('zip -r -j '+temp_dir+'/'+k+' '+temp_dir+'/'+k) \n #os.system('scp \"%s\" \"%s:%s\"' % (temp_dir+'/'+k+'.zip', '[email protected]', rem_dir+argv[1]+'/'+k) ) \n \n remove_dir = '/opt/geobatch/temp/zip_temp'+'/'+argv[1]\n os.system('rm -r -f '+remove_dir+'/*')\n\nret=copy_zip(sys.argv)\n\n#def findNewestDir(directory):\n# os.chdir(directory)\n# dirs = {}\n# for dir in glob.glob('*'):\n# if os.path.isdir(dir):\n# dirs[dir] = os.path.getctime(dir)\n#\n# lister = sorted(dirs.iteritems(), key=operator.itemgetter(1))\n# return lister[-1][0]\n#\n#print \"The newest directory is\", findNewestDir('/opt/data/models/gfs_50km_run00')" }, { "alpha_fraction": 0.40734824538230896, "alphanum_fraction": 0.5021299123764038, "avg_line_length": 28.359375, "blob_id": "aa0bcdb94170116866fa10e463af8001ef6aae61", "content_id": "85151802d2f82651c7fa4fcb1b475fa5d6191311", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1878, "license_type": "no_license", "max_line_length": 67, "num_lines": 64, "path": "/geobatch/lamma/src/test/resources/python/calc.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Carlo Cancellieri\n\ndef calculateRGB(case, values):\n if (case == 'airmass'):\n return airMass(values)\n elif (case == 'Hrv_Fog'):\n return hrvFog(values)\n elif (case == 'Dust'):\n return dust(values)\n elif (case == 'NatColours'):\n return natColours(values)\n else:\n return None\n \ndef airMass(values):\n data1 = values[0]\n data2 = values[1]\n data3 = values[2]\n data4 = values[3]\n red = data1 - data2\n green = data3 - data4\n blue = data1\n byte_red = 255 * ((red - (-25)) / (0 - (-25))) ** 1 / 1\n byte_green = 255 * ((green - (-40)) / (5 - (-40))) ** 1 / 1\n byte_blue = 255 * ((blue - (243)) / (208 - (243))) ** 1 / 1 \n return [byte_red, byte_green, byte_blue]\n\ndef dust(values):\n data1 = values[0]\n data2 = values[1]\n data3 = values[2]\n red = data3 - data2\n green = data2 - data1\n blue = data2\n byte_red = 255 * ((red - (-4)) / (2 - (-4)))**1/1\n byte_green = 255 * ((green - (0)) / (15 - (-0)))**1/2.5\n byte_blue = 255 * ((blue - (261)) / (289 - (261)))**1/1\n return [byte_red,byte_green,byte_blue]\n\ndef hrvFog(values):\n data1 = values[0]\n data2 = values[1]\n data3 = values[2]\n red = data3\n green = data1\n blue = data2\n byte_red = 255 * ((red - (0.0)) / (0.7 - (0.0)))**1/1.7\n byte_green = 255 * ((green - (0.0)) / (1.0 - (0.0)))**1/1.7\n byte_blue = 255 * ((blue - (0.0)) / (1.0 - (0.0)))**1/1.7\n return [byte_red, byte_green, byte_blue]\n\ndef natColours(values):\n data1 = values[0]\n data2 = values[1]\n data3 = values[2]\n red = data3\n green = data2\n blue = data1\n byte_red = 255 * ((red - (0.0)) / (1.0 - (0.0)))**1/1\n byte_green = 255 * ((green - (0.0)) / (1.0 - (0.0)))**1/1\n byte_blue = 255 * ((blue - (0.0)) / (1.0 - (0.0)))**1/1\n return [byte_red, byte_green, byte_blue]" }, { "alpha_fraction": 0.4825476109981537, "alphanum_fraction": 0.485297828912735, "avg_line_length": 34.07476043701172, "blob_id": "f214f21a35a4b0de175b216a1b9ffbcb4fae2028", "content_id": "7e56dee21d8e55ec3632ed02609838994204d082", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 43633, "license_type": "no_license", "max_line_length": 201, "num_lines": 1244, "path": "/geobatch/webapp/pom.xml", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- ======================================================================= \n\tMaven Project Configuration File GeoSolutions GeoBatch Project http://geobatch.codehaus.org \n\tVersion: 0.1 pom.xml 63 2011-05-02 18:55:57Z ccancellieri $ ======================================================================= -->\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n <parent>\n\t <groupId>it.geosolutions.geobatch.lamma</groupId>\n\t <artifactId>gb-lamma</artifactId>\n\t <version>1.3-RC2</version>\n </parent>\n \n <!-- =========================================================== -->\n <!-- Module Description -->\n <!-- =========================================================== -->\n <groupId>it.geosolutions.geobatch.lamma</groupId>\n <artifactId>gb-application-lamma</artifactId>\n <packaging>war</packaging>\n \n <name>Lamma GeoBatch webapp</name>\n <description>GeoSolutions GeoBatch Application</description>\n \n <url>http://www.geo-solutions.it/maven_reports/gb/library/</url>\n <organization>\n <name>GeoSolutions</name>\n </organization>\n <scm>\n <connection>scm:svn:http://svn.geotools.org/geotools/trunk/gt/modules/web/</connection>\n <url>http://svn.geotools.org/geotools/trunk/gt/modules/web/</url>\n </scm>\n <licenses>\n <license>\n <name>Lesser General Public License (LGPL)</name>\n <url>http://www.gnu.org/copyleft/lesser.txt</url>\n <distribution>repo</distribution>\n </license>\n </licenses>\n <properties>\n <netbeans.hint.deploy.server>Tomcat60</netbeans.hint.deploy.server>\n <flow.dir>geobatch/WEB-INF/</flow.dir>\n </properties>\n <!-- =========================================================== -->\n <!-- Dependencies -->\n <!-- =========================================================== -->\n <dependencies>\n \n \n <!-- ============================== -->\n <!-- LAMMA specific deps -->\n <!-- ============================== -->\n\n <dependency>\n <groupId>it.geosolutions.geobatch.lamma</groupId>\n <artifactId>gb-lamma-utils</artifactId>\n <version>1.3-RC2</version>\n </dependency>\n \n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n <version>1.3-RC2</version>\n </dependency>\n\n <!-- ============================== -->\n <!-- GeoBatch actions -->\n <!-- ============================== -->\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-commons</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-freemarker</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-ftp</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geonetwork</artifactId>\n </dependency>\n\n <!-- quick update for LAMMA - we'll use 1.1.1 in GB1.3 final -->\n <dependency>\n <groupId>it.geosolutions</groupId>\n <artifactId>geonetwork-manager</artifactId>\n <version>1.1.1-SNAPSHOT</version>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geoserver</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geostore</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geotiff</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-imagemosaic</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-octave-fileinfileout</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-scripting</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shapefile</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shp2pg</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-splitting</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-taskexecutor</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-tools</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-xstream</artifactId>\n </dependency>\n\n <!-- ============================== -->\n <!-- GeoBatch services -->\n <!-- ============================== -->\n\n <dependency>\n <groupId>it.geosolutions.geobatch.services</groupId>\n <artifactId>gb-jmx</artifactId>\n </dependency>\n\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-octave</artifactId>\n </dependency>\n\n <!-- ============================== -->\n <!-- logging -->\n <!-- ============================== -->\n <dependency>\n <groupId>log4j</groupId>\n <artifactId>log4j</artifactId>\n <scope>runtime</scope>\n </dependency>\n <!-- ============================== -->\n <!-- Platform -->\n <!-- ============================== -->\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-gui</artifactId>\n </dependency>\n <!-- ============================== -->\n <!-- DAO XStream -->\n <!-- ============================== -->\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-dao-xstream</artifactId>\n </dependency>\n <!-- ============================== -->\n <!-- Test: JETTY -->\n <!-- ============================== -->\n <dependency>\n <groupId>org.mortbay.jetty</groupId>\n <artifactId>jetty</artifactId>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.mortbay.jetty</groupId>\n <artifactId>jsp-2.0</artifactId>\n <type>pom</type>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.javassist</groupId>\n <artifactId>javassist</artifactId>\n <scope>test</scope>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <inherited>true</inherited>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-war-plugin</artifactId>\n <configuration>\n <warName>geobatch</warName>\n <webappDirectory>${project.build.directory}/geobatch</webappDirectory>\n </configuration>\n <executions>\n <execution>\n <phase>install</phase>\n <goals>\n <goal>war</goal>\n </goals>\n </execution>\n </executions>\n </plugin>\n <plugin>\n <groupId>org.mortbay.jetty</groupId>\n <artifactId>maven-jetty-plugin</artifactId>\n <version>6.1.8</version>\n <configuration>\n <contextPath>geobatch</contextPath>\n <connectors>\n <connector implementation=\"org.mortbay.jetty.nio.SelectChannelConnector\">\n <port>8081</port>\n <maxIdleTime>10000</maxIdleTime>\n </connector>\n </connectors>\n <contextPath>geobatch</contextPath>\n <webAppSourceDirectory>${project.build.directory}/geobatch</webAppSourceDirectory>\n </configuration>\n </plugin>\n </plugins>\n </build>\n <profiles>\n <!-- GB-SERVICES==================================================== -->\n <!-- JMS =========================================================== -->\n <profile>\n <id>jms</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-jms</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_jms</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-jms</artifactId>\n <classifier>flowsettings</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}/data/</outputDirectory>\n <includes>settings/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- JMX ======================================================== -->\n <profile>\n <id>jmx</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch.services</groupId>\n <artifactId>gb-jmx</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_jms</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch.services</groupId>\n <artifactId>gb-jmx</artifactId>\n <classifier>flowsettings</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}/data/</outputDirectory>\n <includes>settings/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- Octave ======================================================== -->\n <profile>\n <id>octave</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-octave</artifactId>\n </dependency>\n </dependencies>\n </profile>\n <!-- ======================================================================== \n\t\t\tACTIONS ======================================================================== \n\t\t\tWe need sample flows to be packed into the test jars of every action (was: \n\t\t\tflowmanager). maven-dependency-plugin will extract the sample flows and put \n\t\t\tthem in the WEB-INF/data directory. If you need to specify all the actions, \n\t\t\tplease define the all_actions env var. Before using maven-dependency-plugin, \n\t\t\tthe overlay mechanism (http://maven.apache.org/plugins/maven-war-plugin/overlays.html) \n\t\t\twas checked to accomplish this very task, but when more actions were specified, \n\t\t\tonly one was choosen to extract the sample data from. -->\n<!-- <profile>\n <id>remsens</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-remsens</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <executions>\n <execution>\n <id>unpack_remsens</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-remsens</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n-->\n<!-- <profile>\n <id>registry</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-registry</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <executions>\n <execution>\n <id>unpack_registry</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-registry</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n-->\n <profile>\n <id>netcdf2geotiff</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <executions>\n <execution>\n <id>unpack_netcdf2geotiff</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n\n <profile>\n <id>shp2pg</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shp2pg</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_shp2pg</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shp2pg</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- Commons ================================================== -->\n <profile>\n <id>commons</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-commons</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_shp2pg</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-commons</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- GeoNetwork ================================================== -->\n <profile>\n <id>geonetwork</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geonetwork</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_geonetwork</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geonetwork</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- GeoStore ================================================== -->\n <profile>\n <id>geostore</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geostore</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_geostore</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geostore</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- Xstream ================================================== -->\n <profile>\n <id>xstream</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-xstream</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_xstream</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-xstream</artifactId>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- FreeMarker ================================================== -->\n <profile>\n <id>freemarker</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-freemarker</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_freemarker</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-freemarker</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- GeoTIFF ======================================================== -->\n <profile>\n <id>geotiff</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geotiff</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_geotiff</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-geotiff</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- ShapeFile ====================================================== -->\n <profile>\n <id>shapefile</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shapefile</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_shape</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-shapefile</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- Task Executor ================================================== -->\n <profile>\n <id>task-executor</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-taskexecutor</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_taskexec</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-taskexecutor</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- action-octave-fileinfileout ================================================== -->\n <profile>\n <id>freemarker-filein-fileout</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-freemarker</artifactId>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-octave-fileinfileout</artifactId>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-octave</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_octave_fileinfileout</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-octave-fileinfileout</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- FTP ============================================================ -->\n <profile>\n <id>ftp</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-ftp</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_ftp</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-ftp</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- SCRIPTING ====================================================== -->\n <profile>\n <id>scripting</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-scripting</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_scripting</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-scripting</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- SPLITTING ====================================================== -->\n <profile>\n <id>splitting</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-splitting</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_splitting</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-splitting</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n <!-- ImageMosaic ==================================================== -->\n <profile>\n <id>imagemosaic</id>\n <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>\n <dependencies>\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-imagemosaic</artifactId>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-dependency-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>unpack_imagemosaic</id>\n <phase>package</phase>\n <goals>\n <goal>unpack</goal>\n </goals>\n <configuration>\n <artifactItems>\n <artifactItem>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb-action-imagemosaic</artifactId>\n <version>${project.version}</version>\n <classifier>flowdata</classifier>\n <type>jar</type>\n <overWrite>false</overWrite>\n <outputDirectory>${project.build.directory}/${flow.dir}</outputDirectory>\n <includes>data/**</includes>\n </artifactItem>\n </artifactItems>\n <overWriteReleases>true</overWriteReleases>\n <overWriteSnapshots>true</overWriteSnapshots>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n </profile>\n \n <profile>\n <id>lamma</id>\n<!-- <activation>\n <property>\n <name>all</name>\n <value>true</value>\n </property>\n </activation>-->\n <dependencies>\n <dependency>\n\t <groupId>it.geosolutions.geobatch.lamma</groupId>\n\t <artifactId>gb-actions-lamma</artifactId> \n\t <version>1.1-SNAPSHOT</version>\n </dependency>\n\t <dependency>\n <groupId>it.geosolutions</groupId>\n <artifactId>geonetwork-manager</artifactId>\n <version>1.1-SNAPSHOT</version>\n </dependency>\n\t\n </dependencies>\n </profile>\n\n </profiles>\n</project>\n" }, { "alpha_fraction": 0.4657994210720062, "alphanum_fraction": 0.4688417911529541, "avg_line_length": 39.21940994262695, "blob_id": "0ebc7decc644988aacc363060404f4b805cc2912", "content_id": "6e40a6af061910f06aa2eb251222ef034c131f71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 19064, "license_type": "no_license", "max_line_length": 201, "num_lines": 474, "path": "/geobatch/pom.xml", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <!-- =========================================================== -->\n <!-- Project Description -->\n <!-- =========================================================== -->\n <parent>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb</artifactId>\n <version>1.3-RC2</version>\n </parent>\n\n <groupId>it.geosolutions.geobatch.lamma</groupId>\n <artifactId>gb-lamma</artifactId>\n <version>1.3-RC2</version>\n \n <packaging>pom</packaging>\n \n <name>LaMMa: GeoSolutions GeoBatch</name>\n <description>The GeoBatch project for LaMMa</description>\n <url>http://www.geo-solutions.it/maven_reports/gb/</url>\n\n <inceptionYear>2011</inceptionYear>\n\n <organization>\n <name>GeoSolutions</name>\n <url>http://www.geo-solutions.it</url>\n </organization>\n \n <scm>\n <connection>\n scm:svn:https://svn.codehaus.org/geobatch/trunk/\n </connection>\n <url>https://svn.codehaus.org/geobatch/trunk/</url>\n </scm>\n \n <licenses>\n <license>\n <name>Lesser General Public License (LGPL)</name>\n <url>http://www.gnu.org/copyleft/lesser.txt</url>\n <distribution>repo</distribution>\n </license>\n </licenses>\n \n <!-- =========================================================== -->\n <!-- Developers and Contributors -->\n <!-- =========================================================== -->\n <developers>\n <developer>\n <name>Emanuele Tajariol</name>\n <id>etj</id>\n <email>[email protected]</email>\n <organization>GeoSolutions</organization>\n </developer>\n <developer>\n <name>Carlo Cancellieri</name>\n <id>ccancellieri</id>\n <email>[email protected]</email>\n <organization>GeoSolutions</organization>\n </developer> \n <developer>\n <name>Alessio Fabiani</name>\n <id>alessio</id>\n <email>[email protected]</email>\n <organization>GeoSolutions</organization>\n </developer>\n <developer>\n <name>Simone Giannecchini</name>\n <id>simboss</id>\n <email>[email protected]</email>\n <organization>GeoSolutions</organization>\n </developer>\n </developers>\n\n <!-- =========================================================== -->\n <!-- Properties and Profiles -->\n <!-- modify the build process in certain conditions. -->\n <!-- =========================================================== -->\n <!-- Available properties are: -->\n <!-- all build and pack all the available \t\t -->\n <!-- actions \t\t\t\t\t\t\t -->\n <!-- -->\n <!-- =========================================================== -->\n <!-- Available profiles are: -->\n <!-- extensive.tests Performs more extensive tests than -->\n <!-- the default. Only a few modules -->\n <!-- check for this profile. -->\n <!-- -->\n <!-- interactive.tests Performs more extensive tests than -->\n <!-- the default. Only a few modules -->\n <!-- check for this profile. -->\n <!-- -->\n <!-- site.build The main purpose of this build is to -->\n <!-- create reports. Consequently, a -->\n <!-- JUnit test failure will not stop the -->\n <!-- build. The failure should be -->\n <!-- reported by the surefire report -->\n <!-- plugin. -->\n <!-- -->\n <!-- online Profile to active tests which end in -->\n <!-- \"OnlineTest.java\" -->\n <!-- -->\n <!-- stress Profile to active tests which end in -->\n <!-- \"StressTest.java\" -->\n <!-- -->\n <!-- pending Includes modules that are pending -->\n <!-- -->\n <!-- dao.xstream Use XStream in DAO instead of -->\n <!-- default JiBX -->\n <!-- -->\n <!-- Examples: -->\n <!-- mvn -P extensive.tests install -->\n <!-- mvn -P online,stress install -->\n <!-- mvn -P site.build site -->\n <!-- mvn eclipse:eclipse -Djdbc.oracle=true -P pending -->\n <!-- -->\n <!-- While you can specify properties one at a time on the -->\n <!-- command line, for properties describing your -->\n <!-- environment you will want to modify settings.xml -->\n <!-- -->\n <!-- Note that profiles like \"site.build\" are not mandatory -->\n <!-- for using the \"site\" goal. Such profiles just modify -->\n <!-- the build process prior the \"site\" goal execution in a -->\n <!-- manner relevant to the purpose of the \"site\" goal. -->\n <!-- =========================================================== -->\n <properties>\n <allow.test.skip>true</allow.test.skip>\n <allow.test.failure.ignore>false</allow.test.failure.ignore>\n <extensive.tests>false</extensive.tests>\n <interactive.tests>false</interactive.tests>\n <online.skip.pattern>**/*OnlineTest.java</online.skip.pattern>\n <stress.skip.pattern>**/*StressTest.java</stress.skip.pattern>\n <test.maxHeapSize>512M</test.maxHeapSize>\n <src.output>${basedir}/target</src.output>\n </properties>\n\n <!-- =========================================================== -->\n <!-- Dependency Management -->\n <!-- If a POM declares one of those dependencies, then it -->\n <!-- will use the version specified here. Otherwise, those -->\n <!-- dependencies are ignored. -->\n <!-- =========================================================== -->\n\n <dependencyManagement>\n <dependencies>\n <!-- inherit DependencyManagement -->\n <dependency>\n <groupId>it.geosolutions.geobatch</groupId>\n <artifactId>gb</artifactId>\n <version>${project.version}</version>\n <type>pom</type>\n <scope>import</scope>\n </dependency>\n <!-- Metocs -->\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-netcdf2geotiff</artifactId>\n <version>${project.version}</version>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-registry</artifactId>\n <version>${project.version}</version>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-remsens</artifactId>\n <version>${project.version}</version>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.imageio-ext</groupId>\n <artifactId>netcdf-converters</artifactId>\n <version>${imageio-ext.version}</version>\n </dependency>\n <dependency>\n <groupId>it.geosolutions.geobatch.metocs</groupId>\n <artifactId>gb-action-metocs-utils</artifactId>\n <version>${project.version}</version>\n </dependency>\n </dependencies>\n </dependencyManagement>\n\n <!-- =========================================================== -->\n <!-- Dependencies to be inherited by all modules. -->\n <!-- =========================================================== -->\n\n <dependencies>\n\n </dependencies>\n <build>\n <!-- TODO: the resources stuff hardcodes paths to source + test directories,\n we should be able to use some properties here -->\n <resources>\n <resource>\n <directory>${basedir}/src/main/resources</directory>\n <includes>\n <include>**/*.txt</include>\n <include>**/*.sql</include>\n <include>**/*.html</include>\n <include>**/*.xsd</include>\n <include>**/*.xsl</include>\n <include>**/*.xml</include>\n <include>**/*.utf</include>\n <include>**/*.prj</include>\n <include>**/*.py</include>\n <include>**/*.properties</include>\n <include>**/*.query</include>\n <include>**/*.xquery</include>\n <include>**/*.serialized</include>\n <include>META-INF/*.jai</include>\n <include>META-INF/mailcap</include>\n <include>META-INF/services/**</include>\n <include>applicationContext.xml</include>\n <include>**/*.ftl</include>\n <include>**/placeholder</include>\n <!-- used for otherwise empty dirs -->\n </includes>\n <excludes>\n <exclude>**/doc-files/**</exclude>\n </excludes>\n </resource>\n </resources>\n <testResources>\n <testResource>\n <directory>${basedir}/src/test/resources</directory>\n <includes>\n <include>META-INF/services/**</include>\n <include>**/test-data/**</include>\n <include>**/*.html</include>\n <include>**/*.xsd</include>\n <include>**/*.xsl</include>\n <include>**/*.xml</include>\n <include>**/*.csv</include>\n <include>**/*.sld</include>\n <include>**/*.svg</include>\n <include>**/*.gml</include>\n <include>**/*.wkt</include>\n <include>**/*.txt</include>\n <include>**/*.sql</include>\n <include>**/*.png</include>\n <include>**/*.prj</include>\n <include>**/*.py</include>\n <include>**/*.properties</include>\n <include>**/*.serialized</include>\n <include>**/*.ftl</include>\n <include>**/*.tif</include>\n <include>**/*.tiff</include>\n <include>**/*.zip</include>\n <include>**/placeholder</include>\n <!-- used for otherwise empty dirs -->\n </includes>\n <excludes>\n <exclude>**/doc-files/**</exclude>\n </excludes>\n </testResource>\n </testResources>\n <plugins>\n <!-- ======================================================= -->\n <!-- Compilation. -->\n <!-- ======================================================= -->\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-compiler-plugin</artifactId>\n <configuration>\n <source>1.6</source>\n <!-- The -source argument for the Java compiler. -->\n <target>1.6</target>\n <!-- The -target argument for the Java compiler. -->\n <debug>true</debug>\n <!-- Whether to include debugging information. -->\n <encoding>UTF-8</encoding>\n <!-- The -encoding argument for the Java compiler. -->\n </configuration>\n </plugin>\n <!-- ======================================================= -->\n <!-- Tests. -->\n <!-- ======================================================= -->\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-surefire-plugin</artifactId>\n <configuration>\n <includes>\n <include>**/*Test.java</include>\n </includes>\n <excludes>\n <exclude>${online.skip.pattern}</exclude>\n <exclude>${stress.skip.pattern}</exclude>\n <exclude>${test.exclude.pattern}</exclude>\n </excludes>\n <argLine>-Xmx${test.maxHeapSize} -Djava.awt.headless=${java.awt.headless}</argLine>\n <!-- Ignores test failure only if we are generating a -->\n <!-- report for publication on the web site. See the -->\n <!-- profiles section at the begining of this pom.xml file. -->\n <testFailureIgnore>\n ${allow.test.failure.ignore}\n </testFailureIgnore>\n <!-- The two following options have the opposite value of what we would\n like. They are that way because they don't seem to work as expected\n with Surefire 2.3. TODO: Try again when Surefire 2.4 will be available. -->\n <!-- Option to print summary of test suites or just print the test cases that has errors. -->\n <printSummary>true</printSummary>\n <!-- Redirect the unit test standard output to a file. -->\n <redirectTestOutputToFile>false</redirectTestOutputToFile>\n </configuration>\n </plugin>\n <!-- code coverage -->\n <plugin>\n <groupId>org.codehaus.mojo</groupId>\n <artifactId>cobertura-maven-plugin</artifactId>\n <executions>\n <execution>\n <goals>\n <goal>clean</goal>\n </goals>\n </execution>\n </executions>\n </plugin>\n <!-- eclipse ide integration -->\n <plugin>\n <artifactId>maven-eclipse-plugin</artifactId>\n <version>2.5</version>\n <configuration>\n <additionalProjectnatures>\n <projectnature>org.springframework.ide.eclipse.core.springnature</projectnature>\n </additionalProjectnatures>\n </configuration>\n </plugin>\n <!-- ======================================================= -->\n <!-- JAR packaging. -->\n <!-- ======================================================= -->\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-jar-plugin</artifactId>\n <configuration>\n <archive>\n <manifest>\n <addClasspath>true</addClasspath>\n </manifest>\n </archive>\n </configuration>\n </plugin>\n <!-- ======================================================= -->\n <!-- Source packaging. -->\n <!-- ======================================================= -->\n <plugin>\n <inherited>true</inherited>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-source-plugin</artifactId>\n <configuration>\n <attach>false</attach>\n </configuration>\n <executions>\n <execution>\n <id>attach-sources</id>\n <goals>\n <goal>jar</goal>\n </goals>\n </execution>\n </executions>\n </plugin>\n </plugins>\n <extensions>\n <extension>\n <groupId>org.apache.maven.wagon</groupId>\n <artifactId>wagon-ftp</artifactId>\n <version>1.0-beta-2</version>\n </extension>\n </extensions>\n </build>\n <!-- ================================================================== -->\n <!-- Repositories. This is where Maven looks for dependencies. The -->\n <!-- Maven repository is implicit and doesn't need to be specified. -->\n <!-- ================================================================== -->\n <repositories>\n <repository>\n <id>geosolutions</id>\n <name>GeoSolutions Repository</name>\n <url>http://maven.geo-solutions.it</url>\n </repository>\n <repository>\n <id>geotoolkit</id>\n <name>Geotk, GeoAPI and dependencies</name>\n <url>http://maven.geotoolkit.org/</url>\n </repository>\n <repository>\n <id>java.net</id>\n <name>java.net Repository</name>\n <url>http://download.java.net/maven/2/</url>\n </repository>\n <!-- camel -->\n <repository>\n <id>camel.internal.maven.repository</id>\n <name>Camel internal Maven Repo</name>\n <url>http://svn.apache.org/repos/asf/camel/m2-repo</url>\n </repository>\n <repository>\n <id>JBoss</id>\n <url>http://repository.jboss.com/maven2</url>\n </repository>\n <repository>\n <id>Hibernate Spatial repo</id>\n <url>http://www.hibernatespatial.org/repository</url>\n </repository>\n <repository>\n <snapshots>\n <enabled>true</enabled>\n </snapshots>\n <id>opengeo</id>\n <name>OpenGeo Maven Repository</name>\n <url>http://repo.opengeo.org</url>\n </repository>\n <repository>\n <id>maven-restlet</id>\n <name>Public online Restlet repository</name>\n <url>http://maven.restlet.org</url>\n </repository>\n <repository>\n <id>official maven 1</id>\n <name>Public online maven repository</name>\n <url>http://repo1.maven.org/maven2</url>\n </repository>\n <repository>\n <id>official maven 2</id>\n <name>Public online maven repository</name>\n <url>http://repo2.maven.org/maven2</url>\n </repository>\n <repository>\n <id>Unidata</id>\n <name>Unidata UCAR repository</name>\n <url>https://artifacts.unidata.ucar.edu/content/repositories/unidata-releases/</url>\n </repository>\n </repositories>\n \n <!-- =========================================================== -->\n <!-- Plugin repositories. -->\n <!-- This is where Maven looks for plugin dependencies. -->\n <!-- =========================================================== -->\n <pluginRepositories>\n <pluginRepository>\n <id>codehaus-snapshot-plugins</id>\n <name>codehaus-shapshot-plugins</name>\n <url>http://snapshots.repository.codehaus.org/</url>\n <snapshots>\n <enabled>true</enabled>\n </snapshots>\n <releases>\n <enabled>false</enabled>\n </releases>\n </pluginRepository>\n </pluginRepositories>\n\n <!-- =========================================================== -->\n <!-- Modules for the build in approximate dependency order -->\n <!-- =========================================================== -->\n <modules>\n <module>netcdf2geotiff</module>\n <module>lamma</module>\n <module>webapp</module>\n </modules>\n \n <distributionManagement>\n <!--..................................-->\n <!-- GeoSolutions Repository -->\n <!--..................................-->\n <repository>\n <uniqueVersion>false</uniqueVersion>\n <id>geosolutions</id>\n <url>ftp://maven.geo-solutions.it</url>\n </repository>\n </distributionManagement>\n\n</project>\n" }, { "alpha_fraction": 0.5268055200576782, "alphanum_fraction": 0.7226406931877136, "avg_line_length": 79.60713958740234, "blob_id": "f561aef8876b7603b0cf98e18ba54f6e852dca42", "content_id": "12f74e5e7e519d14ac29bb3c8f08a35ffcdff43a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 2257, "license_type": "no_license", "max_line_length": 862, "num_lines": 28, "path": "/GEOBATCH_CONFIG_DIR/commons/env.properties", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "FWTOOLS_HOME=/opt/FWTools-linux-x86_64-3.1.0\nPROJ_LIB=/opt/FWTools-linux-x86_64-3.1.0/usr/share/proj\nHOSTNAME=geobatch\nGDAL_DRIVER_PATH=/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/gdalplugins\nSHELL=/bin/sh\nTERM=xterm\nHISTSIZE=1000\nSSH_CLIENT=217.133.17.186 47917 292\nLC_NUMERIC=C\nSSH_TTY=/dev/pts/1\nUSER=tomcat\nLS_COLORS=no=00:fi=00:di=00;34:ln=00;36:pi=40;33:so=00;35:bd=40;33;01:cd=40;33;01:or=01;05;37;41:mi=01;05;37;41:ex=00;32:*.cmd=00;32:*.exe=00;32:*.com=00;32:*.btm=00;32:*.bat=00;32:*.sh=00;32:*.csh=00;32:*.tar=00;31:*.tgz=00;31:*.arj=00;31:*.taz=00;31:*.lzh=00;31:*.zip=00;31:*.z=00;31:*.Z=00;31:*.gz=00;31:*.bz2=00;31:*.bz=00;31:*.tz=00;31:*.rpm=00;31:*.cpio=00;31:*.jpg=00;35:*.gif=00;35:*.bmp=00;35:*.xbm=00;35:*.xpm=00;35:*.png=00;35:*.tif=00;35:\nLD_LIBRARY_PATH=/opt/FWTools-linux-x86_64-3.1.0/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/mysql:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/ogdi:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas-sse:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas:/opt/FWTools-linux-x86_64-3.1.0/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/mysql:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/ogdi:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas-sse:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas:/opt/FWTools-linux-x86_64-3.1.0/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/mysql:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/ogdi:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas-sse:/opt/FWTools-linux-x86_64-3.1.0/usr/lib64/atlas::/lib64:/lib64:/lib64\nPATH=/opt/FWTools-linux-x86_64-3.1.0/usr/bin:/opt/FWTools-linux-x86_64-3.1.0/usr/bin:/opt/FWTools-linux-x86_64-3.1.0/usr/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin\nMAIL=/var/spool/mail/root\nPWD=/root\nINPUTRC=/etc/inputrc\nLANG=it_IT.UTF-8\nGDAL_DATA=/opt/FWTools-linux-x86_64-3.1.0/usr/share/gdal\nSSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass\nGEOTIFF_CSV=/opt/FWTools-linux-x86_64-3.1.0/usr/share/epsg_csv\nHOME=/home/tomcat\nSHLVL=2\nLOGNAME=tomcat\nSSH_CONNECTION=217.133.17.186 47917 172.16.1.134 292\nLESSOPEN=|/usr/bin/lesspipe.sh %s\nG_BROKEN_FILENAMES=1\n_=/bin/env\n" }, { "alpha_fraction": 0.6098807454109192, "alphanum_fraction": 0.617830753326416, "avg_line_length": 23.81690216064453, "blob_id": "aac6da221635704124dd86f302cae2a210be1eea", "content_id": "95842a490055c4ccd5be7d3a826e0f05954135b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1761, "license_type": "no_license", "max_line_length": 99, "num_lines": 71, "path": "/GEOBATCH_CONFIG_DIR/commons/prova.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Daniele Romagnoli\n# Carlo Cancellieri\n# Riccardo Mari\n\n# RGB AirMass\n#-----------------------------\n#from osgeo import osr\n#from osgeo.gdalconst import GA_ReadOnly, GDT_Byte\nimport os\nimport sys, time\nimport calc\nimport glob\nimport shutil\nimport zipfile\n\ntry: \n from osgeo import gdal\n import numpy\n os.chdir('.')\nexcept ImportError:\n #import gdal\n os.chdir('.')\n\n\ndef simpleCreateRGB(argv):\n # register all of the GDAL drivers\n #gdal.AllRegister()\n \n inDs = []\n \n if (len(sys.argv) == 0):\n print 'Usage:'\n print 'python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ... Channel_n.tif'\n \n # open channels skipping first 2 args\n # 0 == ./createRGB_X.py\t[script file]\n # 1 == OutFile.tif\t[output file]\n for image in sys.argv[1:]:\n out_file = open(\"/opt/geobatch/conf/commons/pippo.txt\",\"w\")\n out_file.write(\"Argomento: \"+image+\"\\n\")\n out_file.close()\n #shutil.copy2('/opt/data/msg3/MSG3_Airmass/*.tif', '/opt/geobatch/temp/pippo')\n #if openImage is None:\n #print 'Could not open ', image\n #sys.exit(-1)\n #break\n #else :\n #inDs.append(openImage)\n #print 'loaded image ', image\n\n# MAIN\n\nret=simpleCreateRGB(sys.argv)\n\n#src_dir = '/opt/data/msg3/MSG3_Airmass/'\n#dst_dir = '/opt/geobatch/temp/pippo'\n\n#for jpgfile in glob.glob(os.path.join(src_dir, \"*.tif\")):\n# shutil.copy(jpgfile, dst_dir)\n\n\n#physicalPathOfFile = '/opt/geobatch/temp/pippo'\n#logicalPathOfFileInZip = 'pippo'\n\n#restoreZip = zipfile.ZipFile(\"test.zip\", \"w\")\n#restoreZip.write(physicalPathOfFile, logicalPathOfFileInZip)\n#restoreZip.close()\n\n#restoreZip = zipfile.ZipFile(\"test.zip\", \"w\", zipfile.ZIP_DEFLATED)" }, { "alpha_fraction": 0.6340279579162598, "alphanum_fraction": 0.6851270198822021, "avg_line_length": 22.046052932739258, "blob_id": "ad3d034825f8a39a987173d4c42bf56e170622c9", "content_id": "76a82f88c5c96bfdb91e6a00bfd129358a2991f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3503, "license_type": "no_license", "max_line_length": 199, "num_lines": 152, "path": "/py_examples/createRGB_2.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Daniele Romagnoli\n# Carlo Cancellieri\n# Riccardo Mari\n\n# RGB AirMass\nimport os, sys, string, utils\nimport time\nimport datetime\nfrom glob import glob\n\n#----------------------------- \n\ntry:\n from osgeo import ogr, gdal\n from osgeo.gdalconst import *\n from math import sqrt\n import numpy\n os.chdir('.')\nexcept ImportError:\n import ogr, gdal\n from gdalconst import *\n from math import sqrt\n import Numeric\n os.chdir('.')\n\n# register all of the GDAL drivers\ngdal.AllRegister()\n\ninDs=[]\n#\t\t0\t\t1\t\t2\t\t\t\t\t\t3\t\t\t\t4\t\t\t\t\t5\n#python ./createRGB_2.py AirMass_Merged.tif MSG2_201109230915_eurafr_WV_062_05.tif MSG2_201109230915_eurafr_WV_073_06.tif MSG2_201109230915_eurafr_IR_097_08.tif MSG2_201109230915_eurafr_IR_108_09.tif\n\nif (len(sys.argv)< 4):\n print 'Usage:'\n print 'python ./createRGB_2.py OutFile.tif Channel_0.tif ... Channel_n.tif'\n\n# open channels skipping first 2 args\n# 0 == ./createRGB_X.py\t[script file]\n# 1 == OutFile.tif\t[output file]\nfor image in sys.argv[2:]:\n print 'loding image ...'\n openImage=gdal.Open(image, GA_ReadOnly) \n if openImage is None:\n print 'Could not open ',image\n sys.exit(-1)\n break\n else :\n inDs.append(openImage)\n print 'loaded image ', image\n\n# get image size\nrows = inDs[0].RasterYSize\ncols = inDs[0].RasterXSize\nbands = inDs[0].RasterCount\n\n# get the bands and block sizes\ninBand=[]\nfor image in range(len(inDs)):\n inBand.append(inDs[image].GetRasterBand(1))\n\n\nblockSizes = utils.GetBlockSize(inBand[0])\nxBlockSize = blockSizes[0]\nyBlockSize = blockSizes[1]\n#print yBlockSize, xBlockSize\n\n# create the output image\ndriver = inDs[0].GetDriver()\noptions = [\"TILED=YES\",\"BLOCKXSIZE=256\",\"BLOCKYSIZE=256\"]\noutMerged = driver.Create(sys.argv[1], cols, rows, 3, GDT_Byte, options)\nif outMerged is None:\n print 'Could not create ',sys.argv[1]\n sys.exit(-1)\nelse:\n print 'Writing to ',sys.argv[1]\n\noutBandR = outMerged.GetRasterBand(1)\noutBandG = outMerged.GetRasterBand(2)\noutBandB = outMerged.GetRasterBand(3)\n\n# loop through the rows\nfor i in range(0, rows, yBlockSize):\n if i + yBlockSize < rows:\n\tnumRows = yBlockSize\n else:\n\tnumRows = rows - i\n\n # loop through the columns\n for j in range(0, cols, xBlockSize):\n\tif j + xBlockSize < cols:\n\t numCols = xBlockSize\n\telse:\n\t numCols = cols - j\n\n\t# read the data\n\tfor data in inBand:\n\t values=data.ReadAsArray(j, i, numCols, numRows).astype(numpy.float)\n\t\n\t# do the calculations \n\trgb=calculateRGB(values)\n\t\n\t# write the data\n\toutBandR.WriteArray(rgb[0], j, i)\n\toutBandG.WriteArray(rgb[1], j, i)\n\toutBandB.WriteArray(rgb[2], j, i)\n\n# flush data to disk, set the NoData value and calculate stats\noutBandR.FlushCache()\noutBandG.FlushCache()\noutBandB.FlushCache()\n\nnoData=9999\n\noutBandR.SetNoDataValue(noData)\noutBandG.SetNoDataValue(noData)\noutBandB.SetNoDataValue(noData)\n\n# georeference the image and set the projection\noutMerged.SetGeoTransform(inDs[0].GetGeoTransform())\n\noutMerged.SetProjection(inDs[0].GetProjection())\n\n\n# build pyramids\noutMerged.BuildOverviews('NEAREST', overviewlist = [2,4,8,16,32])\n\ninDs[0] = None\ninDs[1] = None\ninDs[2] = None\n\noutMerged = None\n\nreturn 1\n\n\ndef calculateRGB(*values)\n\tdata1 = values[0]\n\tdata2 = values[1]\n\tdata3 = values[2]\n\tdata4 = values[3]\n\n\tred = data1 - data2\n\tgreen = data3 - data4\n\tblue = data1\n\t\n\tbyte_red = 255 * ((red - (-25)) / (0 - (-25)))**1/1\n\tbyte_green = 255 * ((green - (-40)) / (5 - (-40)))**1/1\n\tbyte_blue = 255 * ((blue - (243)) / (208 - (243)))**1/1\n\t\n\treturn {byte_red,byte_green,byte_blue}\n" }, { "alpha_fraction": 0.5520262718200684, "alphanum_fraction": 0.566703200340271, "avg_line_length": 28.64285659790039, "blob_id": "b6de225aba93f29d37cd41482a353d0db324859e", "content_id": "00a69fc08a6a196f7410dc43a2a919f6f637352e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9130, "license_type": "no_license", "max_line_length": 215, "num_lines": 308, "path": "/GEOBATCH_CONFIG_DIR/commons/createRGB_2.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Daniele Romagnoli\n# Carlo Cancellieri\n# Riccardo Mari\n\n# RGB AirMass\n#-----------------------------\nfrom osgeo import osr\nfrom osgeo.gdalconst import GA_ReadOnly, GDT_Byte\nimport os\nimport sys, time\nimport utils \nimport calc\n\ntry: \n from osgeo import gdal\n import numpy\n os.chdir('.')\nexcept ImportError:\n import gdal\n os.chdir('.')\n\ndef intersection (geom1, geom2):\n# geom = CreateGeometryFromWkt('POLYGON((0 0, 0 10, 10 10, 10 0, 0 0))')\n #geom2 = CreateGeometryFromWkt('POLYGON((5 0, 5 15, 15 15, 15 5, 5 0))')\n #print geom3\n return geom1.Intersection(geom2)\n\n\n\ndef warpAndCreateRGB(argv):\n # register all of the GDAL drivers\n gdal.AllRegister()\n \n inDs = []\n inPath = []\n if (len(sys.argv) < 5):\n print 'Usage:'\n print 'python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ... Channel_n.tif'\n \n \n # open channels skipping first 2 args\n # 0 == ./createRGB_X.py [script file]\n # 1 == OutFile.tif [output file]\n for image in sys.argv[3:]:\n print 'loding image ...'\n openImage = gdal.Open(image, GA_ReadOnly)\n # printXY(openImage)\n# LatLon(openImage)\n# toLatLon(openImage)\n# getBB(openImage)\n if openImage is None:\n print 'Could not open ', image\n sys.exit(-1)\n break\n else :\n inDs.append(openImage)\n inPath.append(image)\n print 'loaded image ', image\n \n \n # get the bands and no data\n noData=9999\n inBand = []\n rows = []\n cols = []\n area=-1\n hyResImage=-1\n # if apply warp will be set to true warp will be applied\n applyWarp=False\n for b in range(len(inDs)):\n bands = inDs[0].RasterCount\n if (bands > 1):\n print 'WARNING error more than one band recognized'\n band=inDs[b].GetRasterBand(1)\n inBand.append(band)\n # get image size\n rows.append(inDs[b].RasterYSize)\n cols.append(inDs[b].RasterXSize)\n # calculate the bigger area to select\n # the highest image resolution\n areaBand=rows[b]*cols[b]\n if (area<areaBand):\n area=areaBand\n hyResImage=b\n # check if there are different areas \n if (not applyWarp and areaBand<>area):\n applyWarp=True\n \n # calculate the nodata\n #TODO warn latest nodata is used -> warp???\n noDataBand=band.GetNoDataValue()\n if (noDataBand<>noData and noDataBand<>None):\n print 'New noData -> ',noDataBand\n noData=noDataBand\n#-----------------------------------------------\n blockSizes = GetBlockSize(inBand[0])\n cBlockSize = blockSizes[0]\n rBlockSize = blockSizes[1]\n #src_geomatrix=inDs[hyResImage].GetGeoTransform()\n #dst_geomatrix=inDs[b].GetGeoTransform()\n srcPath=inPath[hyResImage]\n \n vrt_text=utils.warp(srcPath, cols[hyResImage], rows[hyResImage], inDs[hyResImage].GetGeoTransform(), inDs[b].GetProjection(), inDs[hyResImage].GetProjection(), inDs[b].GetGeoTransform(), cBlockSize, rBlockSize)\n \n print vrt_text\n warped_image=gdal.Open(vrt_text,GA_ReadOnly)\n#-----------------------------------------------\n #print yBlockSize, xBlockSize\n \n # create the output image\n driver = inDs[0].GetDriver()\n options = [\"TILED=YES\", \"BLOCKXSIZE=256\", \"BLOCKYSIZE=256\"]\n outMerged = driver.Create(sys.argv[2], cols, rows, 3, GDT_Byte, options)\n if outMerged is None:\n print 'Could not create ', sys.argv[1]\n sys.exit(-1)\n else:\n print 'Writing to ', sys.argv[1]\n \n outBandR = outMerged.GetRasterBand(1)\n outBandG = outMerged.GetRasterBand(2)\n outBandB = outMerged.GetRasterBand(3)\n\n # loop through the rows\n for r in range(0, rows, rBlockSize):\n # loop through the columns\n for c in range(0, cols, cBlockSize):\n \n values=[]\n # read the data\n for b in len(range(inBand)):\n # read size\n if (r + rBlockSize < rows[b]):\n numRows = rBlockSize\n else:\n numRows = rows[b] - r\n \n if (c + cBlockSize < cols[b]):\n numCols = cBlockSize\n else:\n numCols = cols[b] - c\n# values.append(inBand[b].ReadAsArray(c+cOffset[b], r+rOffset[b], numCols, numRows).astype(numpy.float32))\n \n # do the calculations \n rgb = calculateRGB(values)\n \n # write the data\n outBandR.WriteArray(rgb[0], c, r)\n outBandG.WriteArray(rgb[1], c, r)\n outBandB.WriteArray(rgb[2], c, r)\n\n # flush data to disk, set the NoData value and calculate stats\n outBandR.FlushCache()\n outBandG.FlushCache()\n outBandB.FlushCache()\n \n outBandR.SetNoDataValue(noData)\n outBandG.SetNoDataValue(noData)\n outBandB.SetNoDataValue(noData)\n \n # georeference the image and set the projection\n outMerged.SetGeoTransform(inDs[0].GetGeoTransform())\n \n outMerged.SetProjection(inDs[0].GetProjection())\n \n # build pyramids\n outMerged.BuildOverviews('NEAREST', overviewlist=[2, 4, 8, 16, 32])\n \n for IN in inDs:\n# IN.CloseDS()\n IN = None\n \n# outMerged.CloseDS()\n outMerged = None\n\n\n\n#---------------------------------------------------------------------------------\n\ndef simpleCreateRGB(argv):\n # register all of the GDAL drivers\n gdal.AllRegister()\n \n inDs = []\n \n if (len(sys.argv) < 5):\n print 'Usage:'\n print 'python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ... Channel_n.tif'\n \n # open channels skipping first 2 args\n # 0 == ./createRGB_X.py\t[script file]\n # 1 == OutFile.tif\t[output file]\n for image in sys.argv[3:]:\n print 'loding image ...'\n openImage = gdal.Open(image, GA_ReadOnly)\n # printXY(openImage)\n utils.LatLon(openImage)\n utils.toLatLon(openImage)\n utils.getBB(openImage)\n if openImage is None:\n print 'Could not open ', image\n sys.exit(-1)\n break\n else :\n inDs.append(openImage)\n print 'loaded image ', image\n \n # get image size\n rows = inDs[0].RasterYSize\n cols = inDs[0].RasterXSize\n \n # get the bands and no data\n noData=9999\n inBand = []\n for image in range(len(inDs)):\n band=inDs[image].GetRasterBand(1)\n inBand.append(band)\n bandNoData=band.GetNoDataValue()\n if (bandNoData<>noData and bandNoData<>None):\n print 'New noData -> ',bandNoData\n noData=bandNoData\n \n blockSizes = utils.GetBlockSize(inBand[0])\n cBlockSize = blockSizes[0]\n rBlockSize = blockSizes[1]\n \n # create the output image\n driver = inDs[0].GetDriver()\n options = [\"TILED=YES\", \"BLOCKXSIZE=256\", \"BLOCKYSIZE=256\"]\n outMerged = driver.Create(sys.argv[2], cols, rows, 3, GDT_Byte, options)\n if outMerged is None:\n print 'Could not create ', sys.argv[2]\n return -1\n else:\n print 'Writing to ', sys.argv[2]\n \n outBandR = outMerged.GetRasterBand(1)\n outBandG = outMerged.GetRasterBand(2)\n outBandB = outMerged.GetRasterBand(3)\n \n # loop through the rows\n for r in range(0, rows, rBlockSize):\n # read size\n if (r + rBlockSize < rows):\n numRows = rBlockSize\n else:\n numRows = rows - r\n \n # loop through the columns\n for c in range(0, cols, cBlockSize):\n \n if (c + cBlockSize < cols):\n numCols = cBlockSize\n else:\n numCols = cols - c\n \n values=[]\n # read the data\n for b in range(len(inBand)):\n \n values.append(inBand[b].ReadAsArray(c, r, numCols, numRows).astype(float))\n \n # do the calculations \n rgb = calc.calculateRGB(sys.argv[1], values) #TODO\n \n values=None;\n \n # write the data\n outBandR.WriteArray(rgb[0], c, r)\n outBandG.WriteArray(rgb[1], c, r)\n outBandB.WriteArray(rgb[2], c, r)\n \n # flush data to disk, set the NoData value and calculate stats\n outBandR.FlushCache()\n outBandG.FlushCache()\n outBandB.FlushCache()\n \n outBandR.SetNoDataValue(noData)\n outBandG.SetNoDataValue(noData)\n outBandB.SetNoDataValue(noData)\n \n # geo-reference the image and set the projection\n outMerged.SetGeoTransform(inDs[0].GetGeoTransform())\n \n outMerged.SetProjection(inDs[0].GetProjection())\n \n # build pyramids\n outMerged.BuildOverviews('NEAREST', overviewlist=[2, 4, 8, 16, 32])\n \n for IN in inDs:\n# IN.CloseDS()\n IN = None\n \n# outMerged.CloseDS()\n outMerged = None\n\n# MAIN\n\nstartTime = time.time()\n\n#warpAndCreateRGB(sys.argv)\nret=simpleCreateRGB(sys.argv)\n\nendTime = time.time()\n\nprint \"The calc took \" + str(endTime-startTime) + \" seconds\"\n" }, { "alpha_fraction": 0.5162561535835266, "alphanum_fraction": 0.540492594242096, "avg_line_length": 24.344999313354492, "blob_id": "93834110ac813b44ad0312b4a4939d0e34d69308", "content_id": "1edcb551cf91ff2da923db0b1bdc7d380583302c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5075, "license_type": "no_license", "max_line_length": 84, "num_lines": 200, "path": "/GEOBATCH_CONFIG_DIR/commons/gst_msg_layer_template.js", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "<#function getRuntime(root)>\n <#if root.TIME_DOMAIN??>\n <#return root.TIME_DOMAIN[0]>\n </#if>\n</#function>\n{\n \"format\":\"image/gif\",\n \"group\":\"${WORKSPACE}\",<#-- FIXED -->\n \"name\":\"${LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1.0,\n \"selected\":false,\n \"source\":\"${WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${DEFAULT_STYLE}\"],\n \"title\":\"${LAYERNAME}\", <#-- FIXED -->\n \"transparent\":true,\n <#if GN_UUID??>\"uuid\":\"${GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n <#if ELEVATION_DOMAIN??>,\"elevation\":\"${ELEVATION_DOMAIN[0]}\"</#if><#-- FIXED -->\n},{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_stati\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Stati\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true,\n},{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_regioni\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Regioni\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true,\n},{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_provincie\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Province\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true,\n},{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"comuni\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Comuni Italia\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true,\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"prec60_l\",\n \"opacity\":0.9,\n \"buffer\": 2,\n \"selected\":false,\n \"tiled\":false,\n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"heatmap_pioggia\"],\n \"style\":[\"pioggia\"],\n \"title\":\"Pioggia oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"prec60_l\",\n \"graphAttribute\": \"prec_mm\",\n \"cumulative\": true, \n \"elevation\":\"0.0\"\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"prec360_l\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"heatmap_pioggia\"],\n \"style\":[\"pioggia\"],\n \"title\":\"Pioggia 6 ore\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"prec360_l\",\n \"graphAttribute\": \"prec_mm\",\n \"cumulative\": true, \n \"elevation\":\"0.0\"\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"temparia_l\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"temperatura\"],\n \"style\":[\"temperatura_legend\"],\n \"title\":\"Temperatura oraria\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"temparia_l\",\n \"graphAttribute\": \"temp_c\",\n \"cumulative\": false, \n \"elevation\":\"0.0\"\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"umid_l\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"umidita\"],\n \"style\":[\"umidita\"],\n \"title\":\"Umidita oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"umid_l\",\n \"graphAttribute\": \"umid_per\", \n \"elevation\":\"0.0\"\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"umid_l\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"temp_rugiada_legend\"],\n \"style\":[\"temp_rugiada_legend\"],\n \"title\":\"Temp rugiada oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"umid_l\",\n \"graphAttribute\": \"trug_c\", \n \"elevation\":\"0.0\"\n},{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"vent_l\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"vento\"],\n \"style\":[\"vento\"],\n \"title\":\"Vento\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"vent_l\",\n \"graphAttribute\": \"vven_ms\",\n \"elevation\":\"0.0\"\n} " }, { "alpha_fraction": 0.7456446290016174, "alphanum_fraction": 0.7700348496437073, "avg_line_length": 38.71428680419922, "blob_id": "5995ec670ef5ef0b537ca234d660824235af5bca", "content_id": "49bbab585564de6712f7ac43ce75a41bedc5c0ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "no_license", "max_line_length": 79, "num_lines": 7, "path": "/py_examples/produceHRV.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceRGBOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"HRV.tif\"\r\n\r\noutputImage = produceRGBOutputImage(inputDir, baseFileName, \"12\", \"12\", \"9>12\")\r\noutputImage.save(out_filePath)\r\n\r\n" }, { "alpha_fraction": 0.37322673201560974, "alphanum_fraction": 0.42423179745674133, "avg_line_length": 34.751792907714844, "blob_id": "7aa6a644c6f66186ea68974a7c61e471e6e66c08", "content_id": "1a577162d135271ec76fd0d0366adf1e1ccd47e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 39898, "license_type": "no_license", "max_line_length": 234, "num_lines": 1116, "path": "/GEOBATCH_CONFIG_DIR/commons/gst_template.js", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "{\n \"about\":{\n \"abstract\":\"Consorzio LaMMA\",\n \"contact\":\"<a href='http://www.lamma.rete.toscana.it/'>http://www.lamma.rete.toscana.it/<\\/a>.\",\n \"title\":\"Dati Meteo - Consorzio LaMMA\"\n },\n \"defaultSourceType\":\"gxp_wmssource\",\n \"isLoadedFromConfigFile\":true,\n \"appType\":\"private\",\n \"map\":{\n \"layoutConfig\": {\n \"monitorResize\": false\n },\n \"center\":[\n \"1250000.0000000\",\n \"5370000.0000000\"\n ],\n \"layers\":[\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"Aerial\",\n \"selected\":false,\n \"source\":\"bing\",\n \"title\":\"Bing Aerial\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"mapnik\",\n \"selected\":false,\n \"source\":\"osm\",\n \"title\":\"Open Street Map\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"osm\",\n \"selected\":false,\n \"source\":\"mapquest\",\n \"title\":\"MapQuest OpenStreetMap\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"ROADMAP\",\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Roadmap\",\n \"visibility\":false\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"TERRAIN\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Terrain\",\n \"visibility\":true\n },\n {\n \"fixed\":true,\n \"group\":\"background\",\n \"name\":\"HYBRID\",\n \"selected\":false,\n \"source\":\"google\",\n \"title\":\"Google Hybrid\",\n \"visibility\":false\n },{\n \"source\": \"ol\",\n \"group\": \"background\",\n \"fixed\": true,\n \"type\": \"OpenLayers.Layer\",\n \"visibility\": false,\n \"args\": [\n \"None\", {\"visibility\": false}\n ]\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Airmass\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG2_Airmass\",\n \"transparent\":true,\n \"uuid\":\"734a9e42-3df7-4c11-bf30-61155c3ac415\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_NatColours\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG2_NatColours\",\n \"transparent\":true,\n \"uuid\":\"034bc391-2cac-46f4-a10a-702e56298a12\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Dust\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG2_Dust\",\n \"transparent\":true,\n \"uuid\":\"7cf877c4-f8b2-4b3d-a4e3-451522125175\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_01\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_01\"],\n \"title\":\"MSG2_Channel_01\",\n \"transparent\":true,\n \"uuid\":\"598d39d7-fa2d-4b51-850e-6623f392f5b3\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_02\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_02\"],\n \"title\":\"MSG2_Channel_02\",\n \"transparent\":true,\n \"uuid\":\"598d39d7-fa2d-4b51-850e-6623f392f5b3\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_03\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_03\"],\n \"title\":\"MSG2_Channel_03\",\n \"transparent\":true,\n \"uuid\":\"598d39d7-fa2d-4b51-850e-6623f392f5b3\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_04\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_04\"],\n \"title\":\"MSG2_Channel_04\",\n \"transparent\":true,\n \"uuid\":\"598d39d7-fa2d-4b51-850e-6623f392f5b3\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_05\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_05\"],\n \"title\":\"MSG2_Channel_05\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_06\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_06\"],\n \"title\":\"MSG2_Channel_06\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_07\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_07\"],\n \"title\":\"MSG2_Channel_07\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_08\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_08\"],\n \"title\":\"MSG2_Channel_08\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_09\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_09\"],\n \"title\":\"MSG2_Channel_09\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_10\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_10\"],\n \"title\":\"MSG2_Channel_10\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_11\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_11\"],\n \"title\":\"MSG2_Channel_11\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG2\",\n \"name\":\"MSG2_Channel_12\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG2\",\n \"styles\":[\"msg2_channel_12\"],\n \"title\":\"MSG2_Channel_12\",\n \"transparent\":true,\n \"uuid\":\"9e100217-29c5-4f86-9be6-728736ab6fa0\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Airmass\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG3_Airmass\",\n \"transparent\":true,\n \"uuid\":\"734a9e42-3df7-4c11-bf30-61155c3ac415\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_NatColours\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG3_NatColours\",\n \"transparent\":true,\n \"uuid\":\"034bc391-2cac-46f4-a10a-702e56298a12\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Dust\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"raster\"],\n \"title\":\"MSG3_Dust\",\n \"transparent\":true,\n \"uuid\":\"7cf877c4-f8b2-4b3d-a4e3-451522125175\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_01\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_01\"],\n \"title\":\"MSG3_Channel_01\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_02\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_02\"],\n \"title\":\"MSG3_Channel_02\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_03\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_03\"],\n \"title\":\"MSG3_Channel_03\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_04\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_04\"],\n \"title\":\"MSG3_Channel_04\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_05\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_05\"],\n \"title\":\"MSG3_Channel_05\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_06\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_06\"],\n \"title\":\"MSG3_Channel_06\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_07\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_07\"],\n \"title\":\"MSG3_Channel_07\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_08\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_08\"],\n \"title\":\"MSG3_Channel_08\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_09\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_09\"],\n \"title\":\"MSG3_Channel_09\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_10\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_10\"],\n \"title\":\"MSG3_Channel_10\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_11\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_11\"],\n \"title\":\"MSG3_Channel_11\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n },{\n \"format\":\"image/gif\",\n \"group\":\"MSG3\",\n \"name\":\"MSG3_Channel_12\",\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"MSG3\",\n \"styles\":[\"msg3_channel_12\"],\n \"title\":\"MSG3_Channel_12\",\n \"transparent\":true,\n \"uuid\":\"8219b507-b5c0-4bce-bab2-c587edd68376\",\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n }<#list event as root>\n\t <#-- NO MATCHES == NO CONTOUR -->\n\t <#if !( root.LAYERNAME?matches('.*Wind.*') )>\n\t <#if root.ELEVATION_DOMAIN?? >\n\t\t <#list root.ELEVATION_DOMAIN as ELEVATION >\n\n\t,{\n \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}_${ELEVATION}\"],\n \"style\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}_${ELEVATION}\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${ELEVATION}\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n\t}\n\t\t </#list><#-- list ELEVATION -->\n\t <#else><#-- ELSE NO ELEVATION_DOMAIN -->\n\t,{\n\t \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}\"],\n \"style\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")}\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n\t}\n\t </#if><#-- if ELEVATION_DOMAIN -->\n\t <#else><#-- else if MATCHES -->\n\t \t <#if root.ELEVATION_DOMAIN?? >\n\t\t <#list root.ELEVATION_DOMAIN as ELEVATION >\n\t,{\n \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}_${ELEVATION}\"],\n \"style\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}_${ELEVATION}\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${ELEVATION}\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n\t}\n\t,{\n \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"wind_arrow_palette\"],\n \"style\":[\"wind_arrow_palette_raster\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${ELEVATION} direction\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\",\n \"elevation\":\"${ELEVATION}\"\n\t}\n\t\t </#list><#-- list ELEVATION -->\n\t <#else><#-- ELSE NO ELEVATION_DOMAIN -->\n\t,{\n\t \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}\"],\n \"style\":[\"${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))}\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")}\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n\t}\n\t,{\n \"format\":\"image/png8\",\n \"group\":\"${root.WORKSPACE} ${root.LAYERNAME?substring(0,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} ${root.LAYERNAME?substring(root.LAYERNAME?last_index_of(\"_\")+1,root.LAYERNAME?last_index_of(\"T\"))}\",<#-- FIXED -->\n \"name\":\"${root.LAYERNAME}\",<#-- FIXED -->\n \"opacity\":1,\n \"selected\":false,\n \"source\":\"${root.WORKSPACE}\", <#-- FIXED -->\n \"styles\":[\"wind_arrow_palette\"],\n \"style\":[\"wind_arrow_palette_raster\"],\n \"title\":\"${root.LAYERNAME?substring(8,root.LAYERNAME?last_index_of(\"_\"))?replace(\"_\",\" \")} direction\",\n \"transparent\":true,\n <#if root.GN_UUID??>\"uuid\":\"${root.GN_UUID}\",</#if><#-- UUID from GeoNetwork -->\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"transitionEffect\":\"resize\"\n\t}\n\t </#if><#-- if ELEVATION_DOMAIN -->\n\t </#if><#-- if MATCHES -->\n </#list>\n,{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_stati\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Stati\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true\n },{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_regioni\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Regioni\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true\n },{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"confini_mondiali_provincie\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Province\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true\n },{\n \"format\":\"image/png\",\n \"group\": \"Limiti Mondiali\",\n \"name\":\"comuni\",\n \"selected\":false, \n \"source\":\"LaMMA_confini\", \n \"styles\":[\"confini\"],\n \"style\":[\"confini\"],\n \"title\":\"Comuni Italia\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"queryable\": false,\n \"displayInLayerSwitcher\": true\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"temparia_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"temperatura\"],\n \"style\":[\"temperatura\"],\n \"title\":\"Temperatura (\\u00b0C) freq. oraria\",\n \"transparent\":true,\n \"visibility\":true,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"temparia_web\",\n \"graphAttribute\": \"temp_c\",\n \"cumulative\": false, \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"umid_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"umidita\"],\n \"style\":[\"umidita\"],\n \"title\":\"Umidit\\u00e0 relativa (%) freq. oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"umid_web\",\n \"graphAttribute\": \"umid_per\", \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"umid_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"temperatura_rug\"],\n \"style\":[\"temperatura_rug\"],\n \"title\":\"Temperatura di rugiada (\\u00b0C) freq. oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"umid_web\",\n \"graphAttribute\": \"trug_c\", \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"vent_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"vento\"],\n \"style\":[\"vento\"],\n \"title\":\"Vento - velocit\\u00e0 (m/s) e direzione (0 - 360) freq. oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"vent_web\",\n \"graphAttribute\": \"vven_ms\",\n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"pres_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"pressione\"],\n \"style\":[\"pressione\"],\n \"title\":\"Pressione s.l.m. (hPa) - freq. oraria\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"pres_web\",\n \"graphAttribute\": \"pres_hpa\",\n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"prec360_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"heatmap_pioggia\"],\n \"style\":[\"heatmap_pioggia\"],\n \"title\":\"Pioggia cum. 6 h (mm)\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"prec360_web\",\n \"graphAttribute\": \"prec_mm\",\n \"cumulative\": true, \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"prec60_web\",\n \"opacity\":0.9,\n \"buffer\": 2,\n \"selected\":false,\n \"tiled\":false,\n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"heatmap_pioggia\"],\n \"style\":[\"heatmap_pioggia\"],\n \"title\":\"Pioggia cum. 1 h (mm)\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"prec60_web\",\n \"graphAttribute\": \"prec_mm\",\n \"cumulative\": true, \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n },{\n \"format\":\"image/png8\",\n \"group\":\"Stazioni\",\n \"name\":\"prec15_web\",\n \"opacity\":0.9,\n \"selected\":false,\n \"tiled\":false, \n \"source\":\"LaMMA_Stazioni\", \n \"styles\":[\"heatmap_pioggia\"],\n \"style\":[\"heatmap_pioggia\"],\n \"title\":\"Pioggia cum. 15min (mm)\",\n \"transparent\":true,\n \"visibility\":false,\n \"ratio\":1,\n \"srs\":\"EPSG:900913\",\n \"getGraph\": true,\n \"graphTable\": \"prec15_web\",\n \"graphAttribute\": \"prec_mm\",\n \"cumulative\": true, \n \"elevation\":\"0.0\",\n \"restrictedExtent\": [1075735.7826,5192220.48427,1381771.78301,5538868.79933]\n }\n ],\n \"maxExtent\":[\"-20037508.34\",\"-20037508.34\",\"20037508.34\",\"20037508.34\"],\n \"maxResolution\": 156543.0339,\n \"projection\":\"EPSG:900913\",\n \"displayProjection\":\"EPSG:900913\",\n \"units\":\"m\",\n \"zoom\":5\n },\n \"modified\":false,\n \"proxy\":\"/proxy/?url=\",\n \"renderToTab\":\"appTabs\",\n \"sources\":{\n <#if event[0]??>\"${event[0].WORKSPACE}\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA ${event[0].WORKSPACE}\",\n \"layerBaseParams\":{\n \"TILED\":true,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://geoportale.lamma.rete.toscana.it/geoserver/${event[0].WORKSPACE}/ows\"\n },</#if>\n \"LaMMA_Stazioni\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Stazioni\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://geoportale.lamma.rete.toscana.it/geoserver/lamma_stazioni/ows\"\n },\n \"LaMMA_confini\":{\n \"ptype\": \"gxp_wmssource\",\n \"title\":\"LaMMA Confini\",\n \"layerBaseParams\":{\n \"TILED\":false,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://geoportale.lamma.rete.toscana.it/geowebcache/service/wms\"\n },\n \"MSG2\":{\n \"ptype\":\"gxp_wmssource\",\n \"title\":\"LaMMA MSG2\",\n \"layerBaseParams\":{\n \"TILED\":true,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://geoportale.lamma.rete.toscana.it/geoserver/MSG2/ows\"\n },\n \"MSG3\":{\n \"ptype\":\"gxp_wmssource\",\n \"title\":\"LaMMA MSG3\",\n \"layerBaseParams\":{\n \"TILED\":true,\n \"TILESORIGIN\":\"-20037508.34,-20037508.34\"\n },\n \"url\":\"http://geoportale.lamma.rete.toscana.it/geoserver/MSG3/ows\"\n },\n \"bing\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_bingsource\"\n },\n \"google\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_googlesource\"\n },\n \"mapquest\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_mapquestsource\"\n },\n \"ol\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_olsource\"\n },\n \"osm\":{\n \"projection\":\"EPSG:900913\",\n \"ptype\":\"gxp_osmsource\"\n }\n },\n \"tools\":[\n {\n \"outputConfig\":{\n \"id\":\"layertree\"\n },\n \"outputTarget\":\"tree\",\n \"ptype\":\"gxp_layertree\"\n },\n {\n \"legendConfig\":{\n \"defaults\":{\n \"baseParams\":{\n \"FORMAT\":\"image/png\",\n \"HEIGHT\":12,\n \"LEGEND_OPTIONS\":\"forceLabels:on;fontSize:10\",\n \"WIDTH\":12\n },\n \"style\":\"padding:5px\"\n },\n \"legendPanelId\":\"legendPanel\"\n },\n \"outputConfig\":{\n \"autoScroll\":true,\n \"title\":\"Show Legend\"\n },\n \"outputTarget\":\"legend\",\n \"ptype\":\"gxp_legend\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_addlayers\",\n \"upload\":true\n },\n {\n \"actionTarget\":[\n \"tree.tbar\",\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_removelayer\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_removeoverlays\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_addgroup\"\n },\n {\n \"actionTarget\":\"tree.tbar\",\n \"ptype\":\"gxp_removegroup\"\n },\n {\n \"actionTarget\":[\n \"tree.tbar\"\n ],\n \"ptype\":\"gxp_groupproperties\"\n },\n {\n \"actionTarget\":[\n \"tree.tbar\",\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_layerproperties\"\n },\n {\n \"actionTarget\":{\n \"index\":0,\n \"target\":\"layertree.contextMenu\"\n },\n \"ptype\":\"gxp_zoomtolayerextent\"\n },\n {\n \"actionTarget\":[\n \"layertree.contextMenu\"\n ],\n \"ptype\":\"gxp_geonetworksearch\"\n },\n {\n \"actionTarget\":{\n \"index\":15,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_navigation\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":7,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_wmsgetfeatureinfo\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":12,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_measure\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":20,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoom\"\n },\n {\n \"actionTarget\":{\n \"index\":24,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoombox\",\n \"toggleGroup\":\"toolGroup\"\n },\n {\n \"actionTarget\":{\n \"index\":22,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_navigationhistory\"\n },\n {\n \"actionTarget\":{\n \"index\":26,\n \"target\":\"paneltbar\"\n },\n \"ptype\":\"gxp_zoomtoextent\"\n },\n {\n \"actionTarget\":{\n \"index\":40,\n \"target\":\"paneltbar\"\n },\n \"needsAuthorization\":true,\n \"ptype\":\"gxp_saveDefaultContext\"\n },\n {\n \"actionTarget\":{\n \"index\":4,\n \"target\":\"paneltbar\"\n },\n \"customParams\":{\n \"outputFilename\":\"fdh-print\"\n },\n \"legendPanelId\":\"legendPanel\",\n \"printService\":\"http://demo1.geo-solutions.it/geoserver/pdf/\",\n \"ptype\":\"gxp_print\"\n }, {\n \"ptype\":\"gxp_playback\"\n }\n ],\n \"viewerTools\":[\n\n ],\n \"xmlJsonTranslateService\":\"http://demo1.geo-solutions.it/xmlJsonTranslate/\"\n}" }, { "alpha_fraction": 0.64359050989151, "alphanum_fraction": 0.6485772728919983, "avg_line_length": 31.778846740722656, "blob_id": "bb2e33231926ab267bb0db1f5be827c6f7c31ffb", "content_id": "a1850141e58e7915f1e526e624d830d375bb4c30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3409, "license_type": "no_license", "max_line_length": 102, "num_lines": 104, "path": "/geobatch/netcdf2geotiff/src/main/java/it/geosolutions/geobatch/metocs/netcdf2geotiff/MetocsDictionary.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\n * GeoBatch - Open Source geospatial batch processing system\n * http://geobatch.codehaus.org/\n * Copyright (C) 2007-2008-2009 GeoSolutions S.A.S.\n * http://www.geo-solutions.it\n *\n * GPLv3 + Classpath exception\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n */\npackage it.geosolutions.geobatch.metocs.netcdf2geotiff;\n\nimport java.util.Map;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n/**\n * \n * @author Carlo Cancellieri - [email protected]\n *\n * @param <DICTIONARY_KEY> Type of the dictionary key into a dictionary (\n * @param <SECTION> Type of the section key (the type of the section instance extending Map<?,String>)\n */\npublic abstract class MetocsDictionary <DICTIONARY_KEY, SECTION extends Map<?, String>> {\n\tprivate final Logger LOGGER;\n\t\n private final Map<DICTIONARY_KEY, SECTION > dictionary;\n\n @SuppressWarnings(\"unused\")\n\tprivate MetocsDictionary() {\n super();\n this.dictionary = null;\n LOGGER=null;\n }\n\n public MetocsDictionary(Map<DICTIONARY_KEY, SECTION > dictionary) {\n super();\n this.dictionary = dictionary;\n LOGGER=LoggerFactory.getLogger(MetocsDictionary.class);\n }\n\n /**\n * returns a Section of the dictionary\n * @param key\n * @return\n */\n public SECTION getVal(final String key) {\n return dictionary.get(key);\n }\n\n protected Map<DICTIONARY_KEY, SECTION> getDictionary() {\n return dictionary;\n }\n \n /**\n * Search into the dictionary the key passed in 'key' parameter first trying to search into the\n * section passed into the 'section' parameter then, if not found, trying to search at the ROOT\n * section.\n * \n * @note can return null.\n * @note avoid call this method using ROOT_KEY as section, for that use\n * getValueFromRootDictionary\n * @param section\n * @param key\n * @return\n */\n public String getValueFromDictionary(final String section, final String key) {\n // search into the dictionary at variable section\n final SECTION varDictionary = getVal(section);\n String name=null;\n if (varDictionary != null) {\n name= varDictionary.get(key);\n }\n if (name==null){\n if (LOGGER.isWarnEnabled())\n LOGGER.warn(\"Unable to find into the dictionary a section with key value: \\'\"\n + section + \"\\'. Trying to search for the key: \\'\" + key\n + \"\\' at ROOT level...\");\n // search into the dictionary ROOT section\n name = getValueFromRootDictionary(key);\n }\n return name;\n }\n \n /**\n * Method to implement to implement a dictionary\n * @param key\n * @return\n */\n public abstract String getValueFromRootDictionary(final String key);\n\n}\n" }, { "alpha_fraction": 0.6541570425033569, "alphanum_fraction": 0.6541570425033569, "avg_line_length": 33.63999938964844, "blob_id": "b2e040dd5ad762b1c3b147e6c1173a2f3802393d", "content_id": "553886ef6ea41b71604fe7514454494fc9e643d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1732, "license_type": "no_license", "max_line_length": 101, "num_lines": 50, "path": "/geobatch/lamma/src/main/java/it/geosolutions/geobatch/lamma/geostore/GSBlob.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "package it.geosolutions.geobatch.lamma.geostore;\n\nimport org.codehaus.jettison.json.JSONArray;\nimport org.codehaus.jettison.json.JSONException;\nimport org.codehaus.jettison.json.JSONObject;\n\npublic class GSBlob {\n\n public static JSONArray getLayers(String blob) throws JSONException{\n if (blob==null || blob.isEmpty()){\n throw new IllegalArgumentException(\"Unable to get layers using a null blob string\");\n }\n JSONObject jsonObj = GSBlob.getBlob(blob);\n JSONObject jsonMap = GSBlob.getMap(jsonObj);\n if (jsonMap != null) {\n return GSBlob.getLayers(jsonMap);\n }\n return null;\n }\n \n public static JSONArray getLayers(JSONObject jsonMap) throws JSONException{\n if (jsonMap==null){\n throw new IllegalArgumentException(\"Unable to get layers using a null jsonMap object\");\n }\n return jsonMap.getJSONArray(\"layers\");\n }\n \n public static void putLayers(JSONObject jsonMap, JSONArray layers) throws JSONException{\n if (jsonMap==null){\n throw new IllegalArgumentException(\"Unable to get layers using a null jsonMap object\");\n }\n jsonMap.put(\"layers\", layers);\n }\n \n public static JSONObject getMap(JSONObject blob) throws JSONException, IllegalArgumentException {\n if (blob==null){\n throw new IllegalArgumentException(\"Unable to getMap using a null blob object\");\n }\n final JSONObject jsonMap = blob.getJSONObject(\"map\");\n if (jsonMap != null) {\n return jsonMap;\n }\n return null;\n }\n \n public static JSONObject getBlob(String blob) throws JSONException{\n return new JSONObject(blob);\n }\n \n}\n" }, { "alpha_fraction": 0.6485769152641296, "alphanum_fraction": 0.6496092081069946, "avg_line_length": 40.601226806640625, "blob_id": "4104d2fcd36917c2610108b237c3d26358135547", "content_id": "211a7d4b20207975b08e4460e2af7f839b09ffe5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6781, "license_type": "no_license", "max_line_length": 236, "num_lines": 163, "path": "/geobatch/lamma/src/main/java/it/geosolutions/geobatch/lamma/models/ModelsUtils.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "package it.geosolutions.geobatch.lamma.models;\n\nimport it.geosolutions.filesystemmonitor.monitor.FileSystemEvent;\nimport it.geosolutions.geobatch.action.scripting.ScriptingAction;\nimport it.geosolutions.geobatch.action.scripting.ScriptingConfiguration;\nimport it.geosolutions.geobatch.flow.event.ProgressListenerForwarder;\nimport it.geosolutions.geobatch.flow.event.action.Action;\nimport it.geosolutions.geobatch.flow.event.action.ActionException;\nimport it.geosolutions.geobatch.imagemosaic.ImageMosaicOutput;\nimport it.geosolutions.geobatch.lamma.geonetwork.GeoNetworkUtils;\nimport it.geosolutions.geobatch.lamma.geostore.GeoStoreUtils;\nimport it.geosolutions.geobatch.lamma.misc.ImageMosaicUtils;\nimport it.geosolutions.geostore.services.rest.GeoStoreClient;\nimport it.geosolutions.tools.freemarker.filter.FreeMarkerFilter;\n\nimport java.io.File;\nimport java.util.Collection;\nimport java.util.List;\nimport java.util.Map;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic class ModelsUtils {\n \n public static String resolveResourceName(Collection<Map> rootList){\n String storeName=(String)rootList.iterator().next().get(ImageMosaicOutput.STORENAME);\n return resolveResourceName(storeName);\n }\n \n public static String resolveResourceName(String storeName){\n return storeName.substring(storeName.lastIndexOf(\"_\")+1);\n }\n\n /**\n * Script Main \"execute\" function\n **/\n public static Map GeoNetworkAndGeoStore(Map argsMap) throws Exception {\n\n final ScriptingConfiguration configuration = (ScriptingConfiguration)argsMap\n .get(ScriptingAction.CONFIG_KEY);\n \n boolean failIgnore = true;\n // TODO\n if (!configuration.isFailIgnored()) {\n failIgnore = false;\n }\n \n final Map cfgProperties = configuration.getProperties();\n \n final File tempDir = (File)argsMap.get(ScriptingAction.TEMPDIR_KEY);\n final File configDir = (File)argsMap.get(ScriptingAction.CONFIGDIR_KEY);\n final ProgressListenerForwarder listenerForwarder = (ProgressListenerForwarder)argsMap\n .get(ScriptingAction.LISTENER_KEY);\n final List<FileSystemEvent> events = (List)argsMap.get(ScriptingAction.EVENTS_KEY);\n\n listenerForwarder.setTask(\"Loading metadata map\");\n listenerForwarder.progressing();\n\n Map<File, Map> rootList = ImageMosaicUtils.loadMetadataMap(events);\n\n listenerForwarder.setTask(\"Publishing to GeoNetwork\");\n listenerForwarder.progressing();\n\n GeoNetworkUtils.publishOnGeoNetworkAction(listenerForwarder,failIgnore,tempDir,configDir, rootList, argsMap, cfgProperties);\n\n listenerForwarder.setTask(\"Publishing to GeoStore\");\n listenerForwarder.progressing();\n\n argsMap = ModelsUtils.publishOnGeoStoreAction(listenerForwarder,failIgnore, rootList.values(), argsMap, cfgProperties, configDir);\n\n listenerForwarder.setTask(\"Publishing complete\");\n listenerForwarder.progressing();\n listenerForwarder.completed();\n \n listenerForwarder.completed();\n return argsMap;\n }\n\n /**\n * @param rootList list of map (datamodels)\n * @param argsMap\n * @return\n * @throws Exception\n */\n /**\n * @param rootList list of map (datamodels)\n * @param argsMap\n * @return\n * @throws Exception\n */\n public static Map publishOnGeoStoreAction(final ProgressListenerForwarder listenerForwarder , final boolean failIgnore, final Collection<Map> rootList , final Map argsMap, final Map cfgProps, final File configDir) throws Exception {\n \n // set workspace\n String workspace = (String)cfgProps.get(GeoStoreUtils.WORKSPACE);\n if (workspace == null) {\n throw new ActionException(Action.class, \"Unable to continue without a \" + GeoStoreUtils.WORKSPACE\n + \" defined, please check your configuration\");\n }\n \n final Logger logger = LoggerFactory\n .getLogger(GeoStoreUtils.class);\n \n // GEOSTORE\n // ////////////////////////////////////////////////////////////////\n \n final String gstTemplateName = (String)cfgProps.get(GeoStoreUtils.GST_METADATA_TEMPLATE);\n if (gstTemplateName == null)\n throw new IllegalArgumentException(\"The key \" + GeoStoreUtils.GST_METADATA_TEMPLATE\n + \" property is not set, please fix the configuration.\");\n final FreeMarkerFilter gstFilter = new FreeMarkerFilter(new File(configDir, gstTemplateName));\n \n \n \n String gstUrl = (String)cfgProps.get(GeoStoreUtils.GSTURL);\n if (gstUrl == null) {\n gstUrl = \"http://localhost:8383/geostore/rest/\";\n }\n String gstUsr = (String)cfgProps.get(GeoStoreUtils.GSTUID);\n if (gstUsr == null) {\n gstUsr = \"admin\";\n }\n String gstPwd = (String)cfgProps.get(GeoStoreUtils.GSTPWD);\n if (gstPwd == null) {\n gstPwd = \"admin\";\n }\n // init geostore parameter connection\n final GeoStoreClient geostore = new GeoStoreClient();\n geostore.setGeostoreRestUrl(gstUrl);\n geostore.setUsername(gstUsr);\n geostore.setPassword(gstPwd);\n \n // GeoStore\n try {\n final String gstLayerTemplateName = (String)cfgProps.get(GeoStoreUtils.GST_LAYER_TEMPLATE);\n if (gstLayerTemplateName == null) {\n // use workspace as GeoStore Category and resource\n GeoStoreUtils.publishOnGeoStore(logger, gstFilter, geostore, rootList, workspace,resolveResourceName(rootList));\n } else {\n final FreeMarkerFilter gstLayerFilter = new FreeMarkerFilter(new File(configDir,\n gstLayerTemplateName));\n // use workspace as GeoStore Category and resource\n GeoStoreUtils.publishAndUpdateOnGeoStore(logger, gstFilter, gstLayerFilter, geostore, rootList, workspace,resolveResourceName(rootList));\n }\n } catch (Exception e) {\n if (failIgnore) {\n if (logger.isErrorEnabled()) {\n logger.error(e.getLocalizedMessage(), e);\n }\n } else {\n if (logger.isErrorEnabled()) {\n logger.error(e.getLocalizedMessage(), e);\n }\n ActionException ae = new ActionException(Action.class, e.getLocalizedMessage(), e.getCause());\n listenerForwarder.failed(ae);\n throw ae;\n }\n }\n \n listenerForwarder.completed();\n return argsMap;\n }\n}\n" }, { "alpha_fraction": 0.648775041103363, "alphanum_fraction": 0.6586247682571411, "avg_line_length": 28.433027267456055, "blob_id": "b2e55fd066edb8e078af5515b63e92e1c28a9ae5", "content_id": "6da067af8ff981568fe2a2c4401280fc3360b52b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 16041, "license_type": "no_license", "max_line_length": 98, "num_lines": 545, "path": "/geobatch/lamma/src/main/java/it/geosolutions/geobatch/lamma/misc/MergeImageUtils.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\n * GeoBatch - Open Source geospatial batch processing system\n * http://code.google.com/p/geobatch/\n * Copyright (C) 2007-2011 GeoSolutions S.A.S.\n * http://www.geo-solutions.it\n *\n * GPLv3 + Classpath exception\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n */\npackage it.geosolutions.geobatch.lamma.misc;\n\nimport it.geosolutions.geobatch.metocs.utils.io.Utilities;\nimport it.geosolutions.imageioimpl.plugins.tiff.TIFFImageReaderSpi;\n\nimport java.awt.image.BufferedImage;\nimport java.awt.image.ColorModel;\nimport java.awt.image.DataBuffer;\nimport java.awt.image.RenderedImage;\nimport java.awt.image.SampleModel;\nimport java.awt.image.WritableRaster;\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.List;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport javax.imageio.ImageIO;\nimport javax.imageio.ImageReader;\nimport javax.imageio.stream.FileImageInputStream;\nimport javax.media.jai.PlanarImage;\nimport javax.media.jai.RasterFactory;\nimport javax.media.jai.TiledImage;\n\nimport org.geotools.coverage.CoverageFactoryFinder;\nimport org.geotools.coverage.grid.GridCoverage2D;\nimport org.geotools.coverage.grid.GridCoverageFactory;\nimport org.geotools.coverage.grid.io.AbstractGridFormat;\nimport org.geotools.factory.Hints;\nimport org.geotools.gce.geotiff.GeoTiffFormatFactorySpi;\nimport org.geotools.gce.geotiff.GeoTiffReader;\nimport org.geotools.geometry.GeneralEnvelope;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n/**\n * \n * @author Carlo Cancellieri - [email protected]\n * \n */\npublic class MergeImageUtils {\n\tprotected final static Logger LOGGER = LoggerFactory\n\t\t\t.getLogger(MergeImageUtils.class);\n\n\tprivate final GeoTiffFormatFactorySpi GEOTIFF_FORMAT_FACTORY_SPI = new GeoTiffFormatFactorySpi();\n\t// ImageIO\n\tprivate final TIFFImageReaderSpi TIFF_READER_SPI = new TIFFImageReaderSpi();\n\n\tprivate final Pattern pattern;\n\tprivate final String compressionType = \"LZW\";\n\tprivate final float compressionRatio = 0.7f;\n\tprivate final int tileSize = 512;\n\n\t// from the model\n\tprivate boolean inittedModel;\n\tprivate GeneralEnvelope envelope;\n\tprivate Double nodata; // TODO short uint ???\n\tprivate int iHeight;\n\tprivate int iWidth;\n\n\tpublic MergeImageUtils(final String regex) {\n\t\tpattern = Pattern.compile(regex);\n\t}\n\n\tpublic File mergeMaskImage(final File outDir, final List<File> files,\n\t\t\tfinal String outVarName) throws Throwable {\n\n\t\t// the first file is used as model for others in the list\n\t\tfinal File model = files.get(0);\n\n\t\tif (!inittedModel)\n\t\t\tinitModel(model);\n\n\t\t// build an in memory image using the files list\n\t\tfinal RenderedImage tiledImage = mergeImage(files);\n\n\t\t// exclude first part of the variable (name) substituting varName\n\t\tfinal String name = buildName(outVarName, model.getName());\n\n\t\treturn writeGeotiff(outDir, name, tiledImage, envelope);\n\t}\n\n\tprivate String buildName(final String nameVar, final String name) {\n\t\tfinal Matcher matcher = pattern.matcher(name);\n\t\tif (matcher != null && matcher.matches()) {\n\t\t\treturn nameVar + \"_\" + matcher.group(1);\n\t\t} else\n\t\t\treturn nameVar + \"_\" + name;\n\t}\n\n\t/**\n\t * Merge all the passed images into a\n\t * \n\t * @param files\n\t * @return\n\t * @throws IllegalArgumentException\n\t */\n\tpublic final RenderedImage mergeImage(final List<File> files)\n\t\t\tthrows IllegalArgumentException {\n\t\tfinal int listSize = files.size();\n\t\tfinal ImageCursor[] cursors = new ImageCursor[listSize];\n\n\t\t// Create a DataBuffer from the values on the image array.\n\t\t// define the type of the image DataModel\n\t\tSampleModel sampleModel = null;\n\t\tif (listSize < 8 && listSize >= 0) {\n\t\t\t// Create a float data sample model.\n\t\t\tsampleModel = RasterFactory.createBandedSampleModel(\n\t\t\t\t\tDataBuffer.TYPE_BYTE, 256, 256, 1);\n\t\t} else if (listSize >= 8 && listSize < 16) {\n\t\t\t// Create a float data sample model.\n\t\t\tsampleModel = RasterFactory.createBandedSampleModel(\n\t\t\t\t\tDataBuffer.TYPE_USHORT, 256, 256, 1);\n\t\t} else {\n\t\t\t// unsupported\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\"MergeImageUtils: ERROR unsupported data type\");\n\t\t\treturn null;\n\t\t}\n\n\t\t// Create a compatible ColorModel.\n\t\tColorModel colorModel = PlanarImage.createColorModel(sampleModel);\n\n\t\t// Create a TiledImage using the float SampleModel.\n\t\tTiledImage tiledImage = new TiledImage(0, 0, iWidth, iHeight, 0, 0,\n\t\t\t\tsampleModel, colorModel);\n\t\ttry {\n\t\t\t// fill in the array\n\t\t\tfor (int h = 0; h < iHeight; h++) {\n\t\t\t\tfor (int w = 0; w < iWidth; w++) {\n\t\t\t\t\tint setValue = -1; // TODO SET TO NAN???\n\t\t\t\t\tfor (int i = 0; i < listSize; i++) {\n\t\t\t\t\t\tif (cursors[i] == null) {\n\t\t\t\t\t\t\tcursors[i] = new ImageCursor();\n\t\t\t\t\t\t\tif (!cursors[i].isInitted()) {\n\t\t\t\t\t\t\t\tif (!cursors[i]\n\t\t\t\t\t\t\t\t\t\t.init(files.get(i), Double.class)) {\n\t\t\t\t\t\t\t\t\t// ERROR!!!!\n\t\t\t\t\t\t\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\t\t\t\t\t\t\tLOGGER.error(\"ERROR for file n \" + i\n\t\t\t\t\t\t\t\t\t\t\t\t+ \" Named: \" + files.get(i));\n\t\t\t\t\t\t\t\t\treturn null;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfinal Number value = cursors[i].getNext();\n\t\t\t\t\t\tif (value.equals(nodata)) {\n\t\t\t\t\t\t\t// System.out.println(\"NOT NaN for file n \"+i+\" value is: \\'\"+value+\"\\'\");\n\t\t\t\t\t\t\tsetValue = nodata.intValue();\n\t\t\t\t\t\t} else if (value.equals(0)) {\n\t\t\t\t\t\t\t// System.out.println(\"VALUE: \\'\"+value+\"\\' for file n.\"+i+\" Named: \"+files.get(i));\n\t\t\t\t\t\t\tsetValue = 0;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tsetValue = i;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttiledImage.setSample(w, h, 0, setValue);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (Throwable t) {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\n\t\t\t\t\t\t\"MergeImageUtils: ERROR: \\'\" + t.getLocalizedMessage()\n\t\t\t\t\t\t\t\t+ \"\\'\", t);\n\t\t} finally {\n\t\t\t// close streams\n\t\t\tif (cursors != null) {\n\t\t\t\tfor (int i = 0; i < listSize; i++) {\n\t\t\t\t\tif (cursors[i] != null && !cursors[i].isInitted()) {\n\t\t\t\t\t\tcursors[i].dispose();\n\t\t\t\t\t\tcursors[i] = null;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// //////////////////////////////////////////////\n\t\t// TODO\n\t\treturn tiledImage;\n\t}\n\n\t/**\n\t * Read the model image file initializing all the data and metadata members\n\t * of this class\n\t * \n\t * @param file\n\t * @return\n\t * @throws IOException\n\t * , IllegalArgumentException\n\t */\n\tprivate final void initModel(final File model) throws Throwable,\n\t\t\tIllegalArgumentException {\n\t\tinittedModel = false;\n\t\t// reading metadata from the model image\n\n\t\t// using geotools\n\t\tGeoTiffReader reader = null;\n\t\ttry {\n\t\t\tfinal AbstractGridFormat format = GEOTIFF_FORMAT_FACTORY_SPI\n\t\t\t\t\t.createFormat();\n\t\t\tif (format.accepts(model)) {\n\t\t\t\t/*\n\t\t\t\t * the reader can decode input let's take a reader on it\n\t\t\t\t */\n\t\t\t\treader = (GeoTiffReader) format.getReader(model);\n\t\t\t\tif (reader == null) {\n\t\t\t\t\tfinal IOException ioe = new IOException(\n\t\t\t\t\t\t\t\"Unable to find a reader for the provided file: \"\n\t\t\t\t\t\t\t\t\t+ model.getAbsolutePath());\n\t\t\t\t\tthrow ioe;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfinal IllegalArgumentException iae = new IllegalArgumentException(\n\t\t\t\t\t\t\"Unable to get a reader for the image file: \"\n\t\t\t\t\t\t\t\t+ model.getAbsolutePath());\n\t\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\t\tLOGGER.error(\n\t\t\t\t\t\t\t\"MergeImageUtils: ERROR: \\'\"\n\t\t\t\t\t\t\t\t\t+ iae.getLocalizedMessage() + \"\\'\", iae);\n\t\t\t\tthrow iae;\n\t\t\t}\n\t\t\t// final CoordinateReferenceSystem crs=reader.getCrs();\n\t\t\tenvelope = reader.getOriginalEnvelope();\n\t\t\tnodata = reader.getMetadata().getNoData();\n\t\t} catch (Throwable t) {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\n\t\t\t\t\t\t\"MergeImageUtils: ERROR: \\'\" + t.getLocalizedMessage()\n\t\t\t\t\t\t\t\t+ \"\\'\", t);\n\t\t\tthrow t;\n\t\t} finally {\n\t\t\ttry {\n\t\t\t\tif (reader != null)\n\t\t\t\t\treader.dispose();\n\t\t\t} catch (Throwable t) {\n\t\t\t}\n\t\t}\n\n\t\t// using ImageIO\n\t\tFileImageInputStream stream = null;\n\t\ttry {\n\t\t\tstream = new FileImageInputStream(model);\n\t\t\tImageReader reader2 = null;\n\t\t\ttry {\n\n\t\t\t\tif (TIFF_READER_SPI.canDecodeInput(stream)) {\n\t\t\t\t\t/*\n\t\t\t\t\t * the reader can decode input let's take a reader on it\n\t\t\t\t\t */\n\t\t\t\t\treader2 = TIFF_READER_SPI.createReaderInstance();\n\t\t\t\t\treader2.setInput(stream);\n\t\t\t\t\t// reader = (ImageReader) new TIFFImageReader(new\n\t\t\t\t\t// TIFFImageReaderSpi());\n\t\t\t\t} else\n\t\t\t\t\tthrow new IllegalArgumentException(\n\t\t\t\t\t\t\t\"Unable to get a reader for the image file: \"\n\t\t\t\t\t\t\t\t\t+ model.getAbsolutePath()); // TODO log\n\n\t\t\t\tfinal int index = reader2.getMinIndex();\n\t\t\t\t// final Iterator<ImageTypeSpecifier> it = reader2\n\t\t\t\t// .getImageTypes(index);\n\t\t\t\t//\n\t\t\t\t// if (it.hasNext()){\n\t\t\t\t// final ImageTypeSpecifier imgTypeSpec= it.next();\n\t\t\t\t// imgTypeSpec.getColorModel().getAlpha(pixel);\n\t\t\t\t// }\n\n\t\t\t\tiHeight = reader2.getHeight(index);\n\t\t\t\tiWidth = reader2.getWidth(index);\n\n\t\t\t} catch (IOException e) {\n\t\t\t\tthrow e; // TODO log\n\t\t\t} finally {\n\t\t\t\ttry {\n\t\t\t\t\tif (reader2 != null)\n\t\t\t\t\t\treader2.dispose();\n\t\t\t\t} catch (Throwable t) {\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (IOException e) {\n\t\t\tthrow e; // TODO log\n\t\t} finally {\n\t\t\ttry {\n\t\t\t\tif (stream != null)\n\t\t\t\t\tstream.close();\n\t\t\t} catch (IOException ioe) {\n\t\t\t}\n\t\t}\n\t\tinittedModel = true;\n\t}\n\n\tpublic final File writeGeotiff(final File outDir, final String name,\n\t\t\tfinal RenderedImage image, final GeneralEnvelope envelope)\n\t\t\tthrows IllegalArgumentException, IOException {\n\t\tfinal Hints hints = new Hints(Hints.TILE_ENCODING, \"raw\");\n\t\tfinal GridCoverageFactory factory = CoverageFactoryFinder\n\t\t\t\t.getGridCoverageFactory(hints);\n\n\t\tfinal GridCoverage2D coverage = factory.create(name, image, envelope);\n\n\t\treturn Utilities.storeCoverageAsGeoTIFF(outDir, name, coverage,\n\t\t\t\tcompressionType, compressionRatio, tileSize);\n\n\t}\n\n\t// /////////////////////////////////////////////\n\t// WIND SPECIFIC\n\t// /////////////////////////////////////////////\n\n\tpublic File mergeWindImage(final File outDir, final File fileU,\n\t\t\tfinal File fileV, final String outVarName) throws Throwable {\n\n\t\t// the first file is used as model for others in the list\n\t\tfinal File model = fileU;\n\n\t\tif (!inittedModel)\n\t\t\tinitModel(model);\n\n\t\t// build an in memory image using the files list\n\t\tfinal RenderedImage tiledImage = mergeWindComponents(fileU, fileV);\n\n\t\t// exclude first part of the variable (name) substituting varName\n\t\tfinal String name = buildName(outVarName, model.getName());\n\n\t\treturn writeGeotiff(outDir, name, tiledImage, envelope);\n\t}\n\n\t/**\n\t * Merge all the passed images into a\n\t * \n\t * @param files\n\t * @return\n\t * @throws IllegalArgumentException\n\t */\n\tpublic final RenderedImage mergeWindComponents(final File fileU,\n\t\t\tfinal File fileV) throws IllegalArgumentException {\n\n\t\tfinal ImageCursor cursorU = new ImageCursor();\n\t\tfinal ImageCursor cursorV = new ImageCursor();\n\n\t\t// Create a DataBuffer from the values on the image array.\n\t\t// Create a double sample model on 2 bands\n\t\tfinal SampleModel sampleModel = RasterFactory.createBandedSampleModel(\n\t\t\t\tDataBuffer.TYPE_DOUBLE, 256, 256, 2);\n\n\t\t// Create a compatible ColorModel.\n\t\tfinal ColorModel colorModel = PlanarImage.createColorModel(sampleModel);\n\n\t\t// Create a TiledImage using the float SampleModel.\n\t\tTiledImage tiledImage = new TiledImage(0, 0, iWidth, iHeight, 0, 0,\n\t\t\t\tsampleModel, colorModel);\n\t\t\n\t\ttry {\n\t\t\tif (!cursorU.isInitted()) {\n\t\t\t\tif (!cursorU.init(fileU, Double.class)) {\n\t\t\t\t\t// ERROR!!!!\n\t\t\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\t\t\tLOGGER.error(\"ERROR for file Named: \"\n\t\t\t\t\t\t\t\t+ fileU.getAbsolutePath());\n\t\t\t\t\treturn null;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!cursorV.isInitted()) {\n\t\t\t\tif (!cursorV.init(fileV, Double.class)) {\n\t\t\t\t\t// ERROR!!!!\n\t\t\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\t\t\tLOGGER.error(\"ERROR for file Named: \"\n\t\t\t\t\t\t\t\t+ fileV.getAbsolutePath());\n\t\t\t\t\treturn null;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// fill in the array\n\t\t\tfor (int h = 0; h < iHeight; h++) {\n\t\t\t\tfor (int w = 0; w < iWidth; w++) {\n\t\t\t\t\t// band 1\n\t\t\t\t\tfinal double setModule;\n\t\t\t\t\t// band 2\n\t\t\t\t\tfinal double setDirection;\n\n\t\t\t\t\tfinal Number valueU = cursorU.getNext();\n\t\t\t\t\tfinal Number valueV = cursorV.getNext();\n\t\t\t\t\tif (valueU.equals(nodata) || valueV.equals(nodata)) {\n\t\t\t\t\t\t// System.out.println(\"NOT NaN for file n \"+i+\" value is: \\'\"+value+\"\\'\");\n\t\t\t\t\t\t// unable to calculate Wind direction and speed\n\t\t\t\t\t\tsetModule = Double.NaN;\n\t\t\t\t\t\tsetDirection = Double.NaN;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// System.out.println(\"VALUE: \\'\"+value+\"\\' for file n.\"+i+\" Named: \"+files.get(i));\n\t\t\t\t\t\tdouble u=valueU.doubleValue();\n\t\t\t\t\t\tdouble v=valueV.doubleValue();\n\t\t\t\t\t\tsetModule = getModule(u,v);\n\t\t\t\t\t\tsetDirection = getDirection(u,v);\n\t\t\t\t\t}\n\t\t\t\t\ttiledImage.setSample(w, h, 0, setModule);\n\t\t\t\t\ttiledImage.setSample(w, h, 1, setDirection);\n//ImageFrame frame = new ImageFrame(tiledImage,\"IMAGE\");\n\t\t\t\t\t \n\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (Throwable t) {\n\t\t\tif (LOGGER.isErrorEnabled())\n\t\t\t\tLOGGER.error(\n\t\t\t\t\t\t\"MergeImageUtils: ERROR: \\'\" + t.getLocalizedMessage()\n\t\t\t\t\t\t\t\t+ \"\\'\", t);\n\t\t} finally {\n\t\t\t// try {\n\t\t\t// if (tiledImage != null)\n\t\t\t// tiledImage.dispose();\n\t\t\t// } catch (Exception e) {\n\t\t\t// }\n\t\t\ttry {\n\t\t\t\t// close streams\n\t\t\t\tif (cursorU != null) {\n\t\t\t\t\tcursorU.dispose();\n\t\t\t\t}\n\t\t\t} catch (Exception e) {\n\t\t\t}\n\t\t\ttry {\n\t\t\t\tif (cursorV != null) {\n\t\t\t\t\tcursorV.dispose();\n\t\t\t\t}\n\t\t\t} catch (Exception e) {\n\t\t\t}\n\t\t}\n\n\t\t// //////////////////////////////////////////////\n\t\t// TODO\n\t\treturn tiledImage;\n\t}\n\n\tpublic double getModule(double valueU, double valueV) {\n\t\treturn Math.sqrt(Math.pow(valueU, 2) + Math.pow(valueV, 2));\n\t}\n\n\t/**\n\t * from: http://mst.nerc.ac.uk/wind_vect_convs.html\n\t * tool to check: http://cactus2000.net/uk/unit/masswin.shtml\n\t * \n\t * @param valueU\n\t * @param valueV\n\t * @return\n\t */\n\tpublic double getDirection(double valueU, double valueV) {\n\t\t\n\t\tfinal double direction=270 - ((180 / Math.PI) * Math.atan2(valueV, valueU));\n\t\tfinal double newDirection;\n\t\t\n\t\tif(direction>360 && direction<=450){\n\t\t\tnewDirection = direction - 360;\n\t\t}else{\n\t\t\tnewDirection = direction;\n\t\t}\n\t\t\n\t\treturn newDirection;\n\t\t/*final double direction=(180 / Math.PI) * Math.atan2(valueU,valueV);\n\t\tfinal double newDirection;\n\t\t\n\t\tif(direction>=0 && direction<=90){\n\t\t\tnewDirection=270-(direction);\n\t\t}else if(direction>90 && direction<=180){\n\t\t\tnewDirection=270-(direction);\n\t\t}else if(direction<0 && direction>=-90){\n\t\t\tnewDirection=270-(direction);\n\t\t}else if(direction>=-180 && direction<-90){\n\t\t\tnewDirection=270-(direction)-360;\n\t\t}else{\n\t\t\tnewDirection=0;\n\t\t}\n\n\t\treturn newDirection;*/\n\t}\n\n\tpublic static void main(String[] args) throws IllegalArgumentException, Throwable {\n\t\t// Store the image using the PNG format.\n\t\tFile imageFile=new File(\"src/main/resources/glpattern.tiff\");\n\t\ttestWriteImage(imageFile);\n\t\tFile imageFile2=new File(\"src/main/resources/glpattern2.tiff\");\n\t\ttestWriteImage(imageFile2);\n\t\t\n\t\tMergeImageUtils util=new MergeImageUtils(\".*\");\n//\t\tutil.initModel(imageFile);\n\t\tutil.iHeight=10;\n\t\tutil.iWidth=25;\n\t\tutil.nodata=Double.NaN;\n\t\tRenderedImage image=util.mergeWindComponents(imageFile, imageFile2);\n\t\t\n\t\tImageIO.write(image, \"TIFF\", imageFile);\n\t\t\n\t\tImageCursor ic=new ImageCursor();\n\t\tic.init(imageFile, Integer.class);\n\t\tint val=0;\n\t\twhile (ic.hasNext()){\n\t\t\tval=(Integer)ic.getNext();\n\t\t\tSystem.out.println(val);\n\t\t}\n\t\t\n\t}\n\t\n\t\n\tpublic static void testWriteImage(File imageFile) throws IOException {\n\t\tint width = 25; // Dimensions of the image\n\t\tint height = 10;\n\t\t// Let's create a BufferedImage for a gray level image.\n\t\tBufferedImage im = new BufferedImage(width, height,\n\t\t\t\tBufferedImage.TYPE_BYTE_GRAY);\n\t\t// We need its raster to set the pixels' values.\n\t\tWritableRaster raster = im.getRaster();\n\t\tlong val = 0;\n\t\t// Put the pixels on the raster, using values between 0 and 255.\n\t\tfor (int h = 0; h < height; h++) {\n\t\t\tfor (int w = 0; w < width; w++) {\n\t\t\t\tSystem.out.println(val);\n\t\t\t\traster.setSample(w, h, 0, val++);\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Store the image using the TIFF format.\n\t\tImageIO.write(im, \"TIFF\", imageFile);\n\t\t\n\t}\n}\n" }, { "alpha_fraction": 0.601543664932251, "alphanum_fraction": 0.6052420139312744, "avg_line_length": 38.11320877075195, "blob_id": "ec28968961c98367a9a2819338a74b68a151175f", "content_id": "d22a7fbd716b5369fe0fd3fbb1c588b936c53e8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6219, "license_type": "no_license", "max_line_length": 150, "num_lines": 159, "path": "/geobatch/lamma/src/main/java/it/geosolutions/geobatch/lamma/meteosat/RGBPythonUtils.java", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "/*\n * GeoBatch - Open Source geospatial batch processing system\n * http://code.google.com/p/geobatch/\n * Copyright (C) 2007-2012 GeoSolutions S.A.S.\n * http://www.geo-solutions.it\n *\n * GPLv3 + Classpath exception\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n */\npackage it.geosolutions.geobatch.lamma.meteosat;\n\nimport it.geosolutions.tools.ant.Task;\nimport it.geosolutions.tools.io.file.Collector;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.List;\nimport java.util.Map;\n\nimport org.apache.commons.io.filefilter.WildcardFileFilter;\nimport org.apache.tools.ant.BuildException;\nimport org.apache.tools.ant.taskdefs.ExecTask;\nimport org.apache.tools.ant.types.Environment.Variable;\nimport org.slf4j.Logger;\n\npublic class RGBPythonUtils {\n\n public static File[] getChannels(File channelsDir, String prefix, Map<String, String> map) throws IllegalArgumentException {\n String wildcardsString = map.get(prefix + RGBPythonUtils.FILTER);\n if (wildcardsString == null)\n throw new IllegalArgumentException(\"The key \" + prefix + RGBPythonUtils.FILTER\n + \" property is not set, please fix the configuration.\");\n \n final String[] wildcards = wildcardsString.split(\",\");\n final int size = wildcards.length;\n final File[] channels = new File[size];\n for (int i = 0; i < size; i++) {\n final Collector coll = new Collector(new WildcardFileFilter(wildcards[i]));\n final List<File> filteredFiles = coll.collect(channelsDir);\n if (filteredFiles.size() == 1) {\n channels[i] = filteredFiles.get(0);\n } else {\n throw new IllegalArgumentException(\n \"Please provide a better filter or check the file channel folder. Filtered file list size is !=0. \"\n + filteredFiles.toString());\n }\n }\n return channels;\n }\n\n /**\n * builds args[] array to be used with the RGB python script:<br>\n * ./script.py calc.function() OutFile.tif Channel_0.tif ...<br>\n * \n * @param configDir\n * @param channelsDir\n * @param outFile\n * @param prefix\n * @param map\n * @return\n */\n public static String[] buildArgs(File configDir, File[] channels, File outFile, String prefix,\n Map<String, String> map) {\n \n final String calcFunction = map.get(prefix + RGBPythonUtils.CALC_FUNCTION);\n if (calcFunction == null)\n throw new IllegalArgumentException(\"The key \" + prefix + RGBPythonUtils.CALC_FUNCTION\n + \" property is not set, please fix the configuration.\");\n \n final String scriptFileName = map.get(prefix + RGBPythonUtils.SCRIPT);\n if (scriptFileName == null)\n throw new IllegalArgumentException(\"The key \" + prefix + RGBPythonUtils.SCRIPT\n + \" property is not set, please fix the configuration.\");\n \n final File script = new File(configDir, scriptFileName);\n \n // python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ...\n // Channel_n.tif\n final String[] args = new String[channels.length + 3];\n int i = 0;\n args[i] = script.getPath();\n args[++i] = calcFunction;\n args[++i] = outFile.getPath();\n for (File file : channels) {\n args[++i] = file.getPath();\n }\n return args;\n }\n\n /**\n * used to call:<br>\n * python script outFile inFiles[0] ... inFiles[N]\n * \n * @param logger can be null\n * @param script\n * @param tempDir from where the process should be called\n * @param outFile\n * @param inFiles\n * @return true or false\n * @throws IOException\n */\n public static boolean run(Logger logger, File env, File tempDir, String... args) throws IOException {\n \n Variable[] vars = Task.loadVars(env);\n \n // python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ...\n // Channel_n.tif\n ExecTask task = Task.buildTask(\"python\", tempDir, args, vars);\n task.setFailIfExecutionFails(true);\n task.setFailonerror(true);\n // task.setResolveExecutable(true);\n // Project p=Task.buildSimpleProject(\"TASK\", task);\n try {\n // p.executeTarget(\"TASK\");\n task.execute();\n } catch (BuildException e) {\n if (logger != null) {\n if (logger.isDebugEnabled()) {\n logger.error(e.getLocalizedMessage(), e);\n } else if (logger.isErrorEnabled()) {\n logger.error(e.getMessage());\n }\n }\n return false;\n }\n return true;\n }\n\n /**\n * The suffix to add to the prefix to obtain the filter KEY into the map\n * this filter will be used to collect (in the filter order) the files to\n * pass to the script\n */\n public final static String FILTER = \"_FILTER\";\n /**\n * The suffix to add to the prefix to obtain the CALC function KEY into the\n * map this is the function called into the calc.py file for this prefix\n * variable\n */\n public final static String CALC_FUNCTION = \"_CALC\";\n /**\n * The suffix to add to the prefix to obtain the script KEY into the map\n * this is the script called for this prefix variable\n */\n public final static String SCRIPT = \"_SCRIPT\";\n\n}\n" }, { "alpha_fraction": 0.5972602963447571, "alphanum_fraction": 0.6173515915870667, "avg_line_length": 22.826086044311523, "blob_id": "e5df2c205dca2f471c302f34d3033eb3ac5cb239", "content_id": "e7b0e579879b56079bc51c3ea673156bc95a621b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 99, "num_lines": 46, "path": "/GEOBATCH_CONFIG_DIR/commons/rimuovi_tif.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "#!/usr/bin/python \n# -*- coding: utf-8 -*-\n# Riccardo Mari\n\nimport os\nimport sys, time\nimport calc\nimport glob\nimport shutil\nimport zipfile\nimport operator\n\ntry: \n from osgeo import gdal\n import numpy\n os.chdir('.')\nexcept ImportError:\n #import gdal\n os.chdir('.')\n\ndef copy_zip(argv):\n\n if (len(sys.argv) == 0):\n print 'Usage:'\n print 'python ./createRGB_2.py calc.function() OutFile.tif Channel_0.tif ... Channel_n.tif'\n\n for image in sys.argv[2:]:\n remote_dir = '/var/www/html/download/' + argv[1].lower()\n newpath_remote_dir = remote_dir+'/'+image\n os.system('ssh [email protected] rm -r -f '+newpath_remote_dir)\n #print newpath_remote_dir\n \n\nret=copy_zip(sys.argv)\n\n#def findNewestDir(directory):\n# os.chdir(directory)\n# dirs = {}\n# for dir in glob.glob('*'):\n# if os.path.isdir(dir):\n# dirs[dir] = os.path.getctime(dir)\n#\n# lister = sorted(dirs.iteritems(), key=operator.itemgetter(1))\n# return lister[-1][0]\n#\n#print \"The newest directory is\", findNewestDir('/opt/data/models/gfs_50km_run00')" }, { "alpha_fraction": 0.5823096036911011, "alphanum_fraction": 0.5872235894203186, "avg_line_length": 24.5, "blob_id": "bec31c4681c2c7186d03dea8a3a2d9519c8a1b1b", "content_id": "591e887fe67bd8f64e9ba599d3e173c867742531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 67, "num_lines": 16, "path": "/GEOBATCH_CONFIG_DIR/commons/prova_funzione.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "import sys, traceback\n\ndef main():\n\n out_file = open(\"/opt/geobatch/conf/commons/pippo.txt\",\"w\")\n for arg in sys.argv: 1\n out_file.write(\"Argomento: \"+arg+\"\\n\")\n out_file.close()\n except KeyboardInterrupt:\n print \"Shutdown requested...exiting\"\n except Exception:\n traceback.print_exc(file=sys.stdout)\n sys.exit(0)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.758865237236023, "alphanum_fraction": 0.7695035338401794, "avg_line_length": 38, "blob_id": "1af88874832d9334f65f681bb67f14b923a4bd86", "content_id": "b136b1b7d6595b3e9112e5ce555e3cdb4a4a2d42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/py_examples/produceVIS.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceRGBOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"VIS.tif\"\r\n\r\noutputImage = produceRGBOutputImage(inputDir, baseFileName, \"1\", \"2\", \"3\")\r\noutputImage.save(out_filePath)\r\n\r\n" }, { "alpha_fraction": 0.7569444179534912, "alphanum_fraction": 0.7743055820465088, "avg_line_length": 39.14285659790039, "blob_id": "40250e5f35c5076ce05a1e7199fea41261b4088e", "content_id": "630c2de299172b891f827a5dfcffe18c7111b641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 78, "num_lines": 7, "path": "/py_examples/produceAirmass.py", "repo_name": "geosolutions-it/lamma", "src_encoding": "UTF-8", "text": "from LammaUtils import parseFileLocations, produceRGBOutputImage\r\n\r\ninputDir, out_filePath, baseFileName = parseFileLocations()\r\nout_filePath = out_filePath + \"Airmass.tif\"\r\n\r\noutputImage = produceRGBOutputImage(inputDir, baseFileName, \"5-6\", \"8-9\", \"5\")\r\noutputImage.save(out_filePath)\r\n" } ]
35
rabiumusah/Hello-World
https://github.com/rabiumusah/Hello-World
564bc8a07f8c27469bf445934c15b8fd7380fef2
e0e4f8efc94404d98e871c5c1f7b49bb24f6b207
d18152cdbdf2a183730f011e2da5841970c5aad8
refs/heads/master
2022-04-27T13:11:29.676273
2020-04-29T15:47:31
2020-04-29T15:47:31
259,967,652
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.42424243688583374, "alphanum_fraction": 0.5151515007019043, "avg_line_length": 12.199999809265137, "blob_id": "abd46c7ccbaadeb29d8eecd7dc1ad8437f4ca8a6", "content_id": "f211a77c9005517df6d3ddfccf82bbd58a7b1f66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/2d_array.py", "repo_name": "rabiumusah/Hello-World", "src_encoding": "UTF-8", "text": "import numpy as np\n\nA = np.array([[1,2],[3,4]])\nB = 1.4*A\nC = A+B\n" } ]
1
murungiKirima/plog
https://github.com/murungiKirima/plog
5ce5b754a4defaa42c27441c666052636675fdad
0d669fa0c04defc7c6d6c20d51f34ed310aa6451
cebc972cb8ea580ffe20dff7a57deacd6faef7a9
refs/heads/master
2020-03-18T18:50:10.060091
2018-05-29T15:20:01
2018-05-29T15:20:01
135,117,819
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7032374143600464, "alphanum_fraction": 0.7284172773361206, "avg_line_length": 23.9887638092041, "blob_id": "f4eab7c40d1c6f9f586760c3990485c85f337797", "content_id": "f3b3fa993ba0a70e67c6c41726fb404cc274204a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2225, "license_type": "permissive", "max_line_length": 127, "num_lines": 89, "path": "/readme.md", "repo_name": "murungiKirima/plog", "src_encoding": "UTF-8", "text": "# Flask Blog.\n#### By Murungi Kirima.\nThis is a blog web application made using flask.\n\n## Description.\nThis is an application that users can log in, view various blogs by authors and shere post to their blogs.\n\n## Usage.\nYou can click this link (https://flask-plog.herokuapp.com/) to launch the app in your browser.\n\n### Prerequisites.\n1. Language; You need to install python3.6\n* Run the following individual commands to install python3.6\n```\n$ sudo add-apt-repository ppa:jonathonf/python-3.6\n$ sudo apt-get update\n$ sudo apt-get install python3.6\n```\n* To confirm python3.6 installation, Run the\nfollowing command on your console:\n```\n$ python3.6\n\nPython 3.6.0 (default, Nov 17 2016, 17:05:23) \n[GCC 5.4.0 20160609] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>>\n...\n>>>exit()\n```\n2. Virtual environment.\n* Run the following individual commands on your console.\n```\n$ sudo apt-get install python3.6-venv\n```\n3. Database.\n* Run the following individual commands on your console to install postgres.\n```\n$ sudo apt-get install postgresql postgresql-contrib libpq-dev\n```\n\n* Now your ready to launch the app for development.\n\n### Source Code and development.\n1. Activate virtual env.\n* Create the virtual environment in your application folder, Run the following in your console\n```\n$ cd <app folder>\n$ python3.6 -m venv virtual\n```\n* Activate virtual environment by running\n```\n$ source virtual/bin/activate\n```\n\n2. Install requirements.\n* Install requirements from requirements.txt file by running\n```\n$ pip install -r requirements.txt\n```\n\n3. Setup the db.\n```\n$ sudo -u postgres createuser --superuser $USER\n```\n\n4. Create the database for the blog.\n```\n$ sudo -u postgres createdb $USER\n```\n\n5. Run app for development.\n* Run the following command on your terminal to run the app in developement\n```\n$ python manage.py server\n```\n\n## Technologies Used.\n* Python3.6 as the development language. \n* Flask as the framework.\n* Bootstrap for the styling. \n* Postgres for the DATABASE. \n* heroku for hosting the application. \n\n## Further help.\nFor additions, submit a pull request and once approved you can make contributions at will. Alternatively contact me at: [email protected]\n\n## License.\nMIT ยฉ2017\n" }, { "alpha_fraction": 0.7269230484962463, "alphanum_fraction": 0.7269230484962463, "avg_line_length": 25.049999237060547, "blob_id": "9428d564ec02e81c98456182612781ed51da308d", "content_id": "677c7ebc6b89747a9e05a10e40c471fff25d496d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "permissive", "max_line_length": 80, "num_lines": 20, "path": "/manage.py", "repo_name": "murungiKirima/plog", "src_encoding": "UTF-8", "text": "from app import create_app, db\nfrom flask_script import Manager, Server\nfrom app.models import User, Post, Comments\n\nfrom flask_migrate import Migrate, MigrateCommand\n# Creating app instance\napp = create_app('production')\n\nmanager = Manager(app)\nmanager.add_command('server', Server)\n\nmigrate = Migrate(app,db)\nmanager.add_command('db',MigrateCommand)\n\[email protected]\ndef make_shell_context():\n return dict(app = app,db = db,User = User, Post = Post, Comments = Comments)\n\nif __name__ == '__main__':\n manager.run()" }, { "alpha_fraction": 0.6763052344322205, "alphanum_fraction": 0.6787148714065552, "avg_line_length": 25.510639190673828, "blob_id": "3eeda6eb02f7cee2d7d2bce7b9d0e75fc0c194e0", "content_id": "40786eb769d8b4c60a9507df6b32e6b1e35b9df3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1245, "license_type": "permissive", "max_line_length": 106, "num_lines": 47, "path": "/app/main/views.py", "repo_name": "murungiKirima/plog", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom . import main\nfrom ..models import Post\nfrom flask_login import login_required\nfrom datetime import datetime\n\[email protected]('/')\ndef index():\n posts = Post.query.order_by(Post.date_posted.desc()).all()\n return render_template('index.html', posts=posts)\n\[email protected]('/about')\ndef about():\n return render_template('about.html')\n\[email protected]('/user/<uname>')\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n\n return render_template(\"profile/profile.html\", user = user)\n\[email protected]('/post/<int:post_id>')\ndef post(post_id):\n post = Post.query.filter_by(id=post_id).one()\n\n return render_template('post.html', post=post)\n\[email protected]('/add')\ndef add():\n return render_template('add.html')\n\[email protected]('/addPost',methods=['POST'])\ndef addPost():\n title = request.form['title']\n subtitle = request.form['subtitle']\n author = request.form['author']\n post = request.form['post']\n\n post = Post(title=title, subtitle=subtitle, author=author, blog_post=post, date_posted=datetime.now())\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(url_for('index'))" }, { "alpha_fraction": 0.651615560054779, "alphanum_fraction": 0.6603148579597473, "avg_line_length": 28.45121955871582, "blob_id": "346b1f20756450c490e37727ee9150bf1ba40cab", "content_id": "0c6e435b1f0d6d0dbefe0ade629d867d1d19f6f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2414, "license_type": "permissive", "max_line_length": 98, "num_lines": 82, "path": "/app/models.py", "repo_name": "murungiKirima/plog", "src_encoding": "UTF-8", "text": "from app import create_app,db\nfrom werkzeug.security import generate_password_hash,check_password_hash\nfrom flask_login import UserMixin\nfrom . import login_manager\nfrom datetime import datetime\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(UserMixin,db.Model):\n\n __tablename__='users'\n\n id = db.Column(db.Integer,primary_key = True)\n username = db.Column(db.String(255))\n email = db.Column(db.String(255),unique = True, index =True)\n password_hash = db.Column(db.String(255))\n pass_secure = db.Column(db.String(255))\n post = db.relationship(\"Post\", backref=\"user\", lazy = \"dynamic\")\n comment = db.relationship(\"Comments\", backref=\"user\", lazy = \"dynamic\")\n\n @property\n def password(self):\n raise AttributeError('You can not read the Password Attribute')\n\n\n @password.setter\n def password(self, password):\n self.pass_secure = generate_password_hash(password)\n\n\n def verify_password(self,password):\n return check_password_hash(self.password_hash, password)\n\n\n def __repr__(self):\n return 'User {}'.format(self.username)\n\nclass Post(db.Model):\n __tablename__ = 'post'\n\n id = db.Column(db.Integer, primary_key = True)\n title = db.Column(db.String(50))\n subtitle = db.Column(db.String(50))\n author = db.Column(db.String(20))\n date_posted = db.Column(db.DateTime)\n blog_post = db.Column(db.Text)\n\n user_id = db.Column(db.Integer,db.ForeignKey(\"users.id\"))\n comment = db.relationship(\"Comments\", backref=\"post\", lazy = \"dynamic\")\n\n def save_post(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def clear_post(cls):\n Post.all_post.clear()\n\n def get_pitches(id):\n post = Post.query.all()\n return post\n\nclass Comments(db.Model):\n __tablename__ = 'comments'\n\n id = db.Column(db. Integer, primary_key=True)\n comment_id = db.Column(db.String(255))\n date_posted = db.Column(db.DateTime, default=datetime.utcnow)\n\n user_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"))\n post_id = db.Column(db.Integer, db.ForeignKey(\"post.id\"))\n\n def save_comment(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def get_comments(self, id):\n comment = Comments.query.order_by(Comments.date_posted.desc()).filter_by(post_id=id).all()\n return comment" }, { "alpha_fraction": 0.7103559970855713, "alphanum_fraction": 0.716828465461731, "avg_line_length": 27.136363983154297, "blob_id": "d94aa1fb23b97735b8add48d6a42aee346fba230", "content_id": "df1bda856b945d47a3e02b4eaa91eada293d77cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "permissive", "max_line_length": 117, "num_lines": 22, "path": "/config.py", "repo_name": "murungiKirima/plog", "src_encoding": "UTF-8", "text": "import os\n\nclass Config:\n CSRF_INSTALLED = True\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'postgresql+psycopg2://murungi:murungi1@localhost/plog')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SECRET_KEY=\"svcdsdhgcvghsg\"\n\nclass ProdConfig(Config):\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DATABASE_URL\")\n\nclass DevConfig(Config):\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'postgresql+psycopg2://murungi:murungi1@localhost/plog')\n DEBUG = True\n\nclass TestingConfig(Config):\n TESTING = True\n\nconfig_options = {\n 'development': DevConfig,\n 'production': ProdConfig,\n }" } ]
5
CUN-bjy/WalkYTo
https://github.com/CUN-bjy/WalkYTo
0079f8c21fe314c10be3d653c21915038b70ff64
ecd1a568546a08274d9257d42eab56dbf86c15b1
d2fee25a534329677a8ff31bc0982a267aa7f252
refs/heads/master
2021-01-01T16:22:22.692781
2017-11-30T10:46:02
2017-11-30T10:46:02
97,814,758
2
2
null
2017-07-20T09:08:20
2017-07-21T04:46:32
2017-08-16T07:38:55
Python
[ { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.6100569367408752, "avg_line_length": 30, "blob_id": "a5590281fb3e46289312bc1a53af2a65d9ac4e63", "content_id": "a03a2066523db8812b0c7582127f2ee99de579e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 94, "num_lines": 34, "path": "/src/GetLinkState.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import sys, os\nimport math\n\nimport rospy\nfrom gazebo_msgs.srv import *\n\ndef get_link_state(link_name, reference_frame):\n rospy.wait_for_service('gazebo/get_link_state')\n \n try:\n # create a handle to the add_two_ints service\n link_state = rospy.ServiceProxy('gazebo/get_link_state', GetLinkState,persistent=True)\n \n # simplified style\n resp1 = link_state(link_name, reference_frame)\n print resp1.status_message\n return resp1.link_state.twist.angular\n\n \n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\nif __name__ == \"__main__\":\n link_name1 = 'MR_bell_imu_1::MC-9' \n reference_frame1 = 'MR_bell_imu_1::C3'\n a = get_link_state(link_name1, reference_frame1)\n b = get_link_state('MR_bell_imu_1::MA-2', 'MR_bell_imu_1::A2')\n # b = get_link_state(link_name2, reference_frame2)\n print(math.sqrt(a.x**2+a.y**2+a.z**2))\n print(math.sqrt(b.x**2+b.y**2+b.z**2))\n # print(b)\n # print(a.x - b.x)\n # print(a.y - b.y)\n # print(a.z - b.z)\n" }, { "alpha_fraction": 0.5707070827484131, "alphanum_fraction": 0.6010100841522217, "avg_line_length": 29.461538314819336, "blob_id": "75d29b6bf667828e92f560911c7b00d9e1463c73", "content_id": "3a1859fad8a762a7d575928374d5aa934e6a0227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 99, "num_lines": 39, "path": "/src/link_states_listener.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport rospy\nfrom gazebo_msgs.msg import LinkStates\nimport math\n\nlink_reference = {'ML-0':'CORE','ML-1':'L2',\n'ML-2':'L3-1','ML-3':'L3-1','ML-4':'L4','MR-5':'CORE',\n'MR-6':'R2','MR-7':'R3-1','MR-8':'R3-1','MR-9':'R4'}\n\ndef callback(data):\n # print(data.name)\n # print(data.pose[])\n model_name = 'MS_Faraday_imu'\n vel_list = [0,0,0,0,0,0,0,0,0,0]\n\n for key in list(link_reference.keys()):\n link_name = key\n reference_name = link_reference[key]\n joint_name = '%s::%s'%(model_name,link_name)\n ref_joint_name = '%s::%s'%(model_name,reference_name)\n\n twist = data.twist[data.name.index(joint_name)].angular\n ref_twist = data.twist[data.name.index(ref_joint_name)].angular\n\n vel = math.sqrt((twist.x-ref_twist.x)**2+(twist.y-ref_twist.y)**2+(twist.z-ref_twist.z)**2)\n vel_list[int(key[3])] = vel\n #for joint_name in joint_name_list:\n print(vel_list)\n\n\ndef listener():\n rospy.init_node('link_states',anonymous = True)\n # gazebo_msgs/LinkStates --> rostopic type\n rospy.Subscriber('/gazebo/link_states', LinkStates, callback)\n rospy.spin()\n\n\nif __name__ == '__main__':\n listener()\n" }, { "alpha_fraction": 0.5568448901176453, "alphanum_fraction": 0.5756625533103943, "avg_line_length": 25.575000762939453, "blob_id": "7dbd00e453d140be5867ad2d44219b8f62a38ec1", "content_id": "aa41d0a1d561c007eef453efc43ba2649ca0e1a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6377, "license_type": "no_license", "max_line_length": 105, "num_lines": 240, "path": "/src/analysis_tools/analysis.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import neat, visualize\nimport pickle,sys, os, time, signal\nimport rospy\n\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Empty\nfrom walkyto.srv import *\n\nfrom termcolor import cprint\n\ndef manual():\n\tprint(\"\\n\"),\n\tprint(\"=============================\")\n\tprint(\" Analysis tool for WalkYTo\")\n\tprint(\"1. network visualizer\")\n\tprint(\"2. motion tester\")\n\tprint(\"3. I/O viz\")\n\tprint(\"=============================\")\n\treturn input(\"select a work you want>\")\n\n\ndef extract_gene():\n\tfilename = input(\"Drag the stats file and press the enter\\n(to quit, enter 'q' including quotation)\\n:\")\n\tif filename ==\"q\":\n\t\treturn False,False\n\n\tstat_file = open(filename)\n\tstats = pickle.load(stat_file)\n\n\n\n\tlocal_dir = os.path.dirname(filename)\n\tconfig_file = os.path.join(local_dir, 'config-feedforward')\n\t# Load configuration\n\tconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n\t\t\t\t\t\tneat.DefaultSpeciesSet, neat.DefaultStagnation,\n\t\t\t\t\t\tconfig_file)\n\n\n\n\tmost_gene = stats.best_unique_genomes(5)\n\treturn most_gene, config\n#================================================================================================\ndef network_visualizer():\n\twhile True:\n\t\tprint(\"\\n\"),\n\t\tmost_gene, config = extract_gene()\n\t\tif most_gene == False:\n\t\t\treturn\n\n\t\tnode_names = {-1:'ML-0(I)', -2:'ML-1(I)', -3:'ML-2(I)', -4:'ML-3(I)',-5:'ML-4(I)',\n\t\t\t\t\t\t-6:'MR-5(I)',-7:'MR-6(I)',-8:'MR-7(I)',-9:'MR-8(I)',-10:'MR-9(I)',\n\t\t\t\t\t\t0:'ML-0(O)', 1:'ML-1(O)', 2:'ML-2(O)', 3:'ML-3(O)',4:'ML-4(O)',5:'MR-5(O)',\n\t\t\t\t\t\t6:'MR-6(O)',7:'MR-7(O)', 8:'MR-8(O)',9:'MR-9(O)'}\n\n\t\tfor i in range(5):\n\t\t\tif i != 4:\n\t\t\t\tview = False\n\t\t\telse:\n\t\t\t\tview = True\n\t\t\tvisualize.draw_net(config, most_gene[i], view=view, node_names=node_names,filename=\"%d\"%i)\n\n\t\t\tprint (\"%d:\"%i, most_gene[i].fitness)\n#===================================================================================================\ndef gene_id_publisher(gene_string):\n\tpub = rospy.Publisher('gene_pub', String, queue_size=10)\n\tpub.publish(gene_string)\n\ndef fit_caller(channel):\n\trospy.wait_for_service('sim_run%d'%channel)\n\ttry:\n\t\tSim_Run = rospy.ServiceProxy('sim_run%d' % channel, SimRun, persistent=True)\n\n\t\tresp = Sim_Run.call(SimRunRequest(True))\n\n\t\tif resp.success:\n\t\t\treturn resp.distance\n\t\telse:\n\t\t\treturn -1\n\n\texcept rospy.ServiceException, e:\n\t\tprint \"Service call failed: %s\"%e\n\ndef gazebo_clear():\n\trospy.wait_for_service('gazebo/reset_simulation')\n\ttry:\n\t\trs_sim = rospy.ServiceProxy('gazebo/reset_simulation', Empty)\n\n\t\tresp = rs_sim.call()\n\n\texcept rospy.ServiceException, e:\n\t\tprint \"Service call failed: %s\"%e\n\ndef motion_tester():\n\tcore_pid = os.fork()\n\tif core_pid == 0:\n\t\tc_log = os.open(\"core_log\", os.O_RDWR|os.O_CREAT|os.O_TRUNC)\n\t\tos.close(sys.__stdout__.fileno());os.dup(c_log)\n\t\tos.close(sys.__stderr__.fileno());os.dup(c_log)\n\n\t\tos.execlp(\"roscore\", 'roscore')\n\t\tsys.exit()\n\n\tsimulator_pid = os.fork()\n\tif simulator_pid == 0:\n\t\ttime.sleep(3)\n\t\ts_log = os.open(\"simulator_log\", os.O_RDWR|os.O_CREAT|os.O_TRUNC)\n\t\tos.close(sys.__stdout__.fileno());os.dup(s_log)\n\t\tos.close(sys.__stderr__.fileno());os.dup(s_log)\n\n\t\tos.execvp(\"roslaunch\", ('roslaunch','walkyto','motion_test.launch'))\n\t\tsys.exit()\n\n\tcprint(\"boot on the gazebo & simulators(12s)\", 'blue', 'on_white')\n\tfor i in range(12):\n\t\ttime.sleep(1)\n\t\tos.write(sys.__stderr__.fileno(),\"###\")\n\t\tif i == 9:\n\t\t\tclient_pid = os.fork()\n\t\t\tif client_pid == 0:\n\t\t\t\tc_log = os.open(\"client_log\", os.O_RDWR|os.O_CREAT|os.O_TRUNC)\n\t\t\t\tos.close(sys.__stdout__.fileno());os.close(sys.__stderr__.fileno())\n\t\t\t\tstdout = os.dup(c_log);stderr = os.dup(c_log)\n\n\t\t\t\tos.execvp(\"roslaunch\", ('roslaunch', 'walkyto', 'gzclient.launch'))\n\t\t\t\tsys.exit()\n\tprint(\"\\n\"),\n\n\n\trospy.init_node('analyst')\n\twhile True:\n\t\tmost_gene, config = extract_gene()\n\t\tif most_gene == False:\n\t\t\tbreak\n\n\t\twhile True:\n\t\t\tprint(\"which genome?\")\n\n\t\t\tfor i in range(5):\n\t\t\t\tprint(\"%d) gene_%d(%f)\"%(i,i, most_gene[i].fitness))\n\t\t\tgene_num = input(\"5) all\\n6) quit\\n\\n:\")\n\n\n\t\t\tif gene_num == 5:\n\t\t\t\tfor i in range(5):\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\t\tgene_id_publisher('-')\n\t\t\t\tgazebo_clear()\n\t\t\t\tfor i in range(5):\n\t\t\t\t\tgen_file = open(\"%s/src/genes/%d\" % (os.getenv('WALKYTO_PATH'),i),'w')\n\t\t\t\t\tpickle.dump(most_gene[i], gen_file)\n\t\t\t\t\tgen_file.close()\n\n\t\t\t\tgene_string = '0/1/2/3/4'\n\n\t\t\t\tprint(\"gene_id:\", gene_string)\n\t\t\t\tgene_id_publisher(gene_string)\n\n\t\t\t\tfcnt = 0; fit_list=[]\n\t\t\t\twhile fcnt < 5:\n\t\t\t\t\tgene_id_publisher('-%d'%fcnt)\n\t\t\t\t\tfitness = fit_caller(fcnt+1)\n\t\t\t\t\tif fitness != -1:\n\t\t\t\t\t\tfit_list.append(fitness)\n\t\t\t\t\t\tfcnt = fcnt + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tnow = rospy.get_rostime()\n\t\t\t\t\t\tif now.secs%10 == 0 :\n\t\t\t\t\t\t\trospy.loginfo(\"Current time %i %.3i\", now.secs, now.nsecs)\n\t\t\t\t\t\ttime.sleep(0.5)\n\n\t\t\t\tprint(\"fit:\", fit_list)\n\n\t\t\telif gene_num==0 or gene_num==1 or gene_num==2 or gene_num==3 or gene_num==4:\n\t\t\t\tfor i in range(5):\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\t\tgene_id_publisher('-')\n\t\t\t\tgazebo_clear()\n\t\t\t\tgen_file = open(\"%s/src/genes/%d\" % (os.getenv('WALKYTO_PATH'),gene_num),'w')\n\t\t\t\tpickle.dump(most_gene[gene_num], gen_file)\n\t\t\t\tgen_file.close()\n\n\t\t\t\tgene_string = '%s/-1/-1/-1/-1'%str(gene_num)\n\t\t\t\tprint(\"gene_id:\", str(gene_num))\n\t\t\t\tgene_id_publisher(gene_string)\n\n\t\t\t\tfcnt = 0; fit_list=[]\n\t\t\t\twhile fcnt == 0:\n\t\t\t\t\tgene_id_publisher('-%d'%fcnt)\n\t\t\t\t\tfitness = fit_caller(fcnt+1)\n\t\t\t\t\tif fitness != -1:\n\t\t\t\t\t\tfit_list.append(fitness)\n\t\t\t\t\t\tfcnt = fcnt + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tnow = rospy.get_rostime()\n\t\t\t\t\t\tif now.secs%10 == 0 :\n\t\t\t\t\t\t\trospy.loginfo(\"Current time %i %.3i\", now.secs, now.nsecs)\n\t\t\t\t\t\ttime.sleep(0.5)\n\n\t\t\t\tprint(\"fit:\", fit_list)\n\t\t\telse:\n\t\t\t\tbreak\n\n\tos.kill(simulator_pid, signal.SIGINT); os.kill(client_pid, signal.SIGINT)\n\tcprint(\"boot down the gazebo & simulators(5s)\", 'blue', 'on_white')\n\tfor i in range(5):\n\t\ttime.sleep(1)\n\t\tos.write(sys.__stderr__.fileno(),\"#######\")\n\tprint(\"##\\n\"),\n\n\n#==========================================================================================\nif __name__ == '__main__':\n\tcur_child_cnt = 0\n\n\twhile True:\n\t\tselected = manual()\n\n\t\tif selected == 1:\n\t\t\tpid = os.fork()\n\t\t\tif pid == 0:\n\t\t\t\tnetwork_visualizer()\n\t\t\t\tsys.exit()\n\t\t\tcur_child_cnt += 1\n\n\t\telif selected == 2:\n\t\t\tpid = os.fork()\n\t\t\tif pid == 0:\n\t\t\t\tmotion_tester()\n\t\t\t\tsys.exit()\n\t\t\tcur_child_cnt += 1\n\t\telif selected == 3:\n\t\t\tprint(\"\\nit is not completed function\")\n\t\telse:\n\t\t\tprint(\"\\n!!wrong typed!! I'm gonna exit\")\n\t\t\tsys.exit()\n\n\t\tif cur_child_cnt > 0:\n\t\t\tos.wait()\n\t\t\tcur_child_cnt-=1" }, { "alpha_fraction": 0.7430760264396667, "alphanum_fraction": 0.7436652779579163, "avg_line_length": 43.657894134521484, "blob_id": "68433c717c4d8a21f480bf0d50d39074c717ea83", "content_id": "f947f534d885c54d5677d945384e1333dd7527fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1697, "license_type": "no_license", "max_line_length": 203, "num_lines": 38, "path": "/README.md", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "# WalkYTo\n### :Walk Yourself, Toddler!\n\nHanyang University, Robotics, CPS & AI LAB\n\nUndergraduate Researcher Program\n>Junyeob Beak([email protected]), Hyeonwoo Park([email protected]), Seunghwan Yu([email protected])\n\n\n#### Nessesary for user to launch this project.\n- \"gazebo_ros_pkgs\" for graphic simulation -- ros package\n- \"neat-python\" for genetic network model -- python module\n- \"tensorflow\" for DQN implementation -- python module\n\n---\n### Gazebo Simulation System\n![Image](https://github.com/CUN-bjy/WalkYTo/blob/master/system.jpg?raw=true)\n*with bell_and_faraday launch file*\n\n\n\n### Commend for launch\n<pre>roslaunch walkyto bell_and_faraday_world.launch</pre>\n\nif you want to run gazebo simulator, with the argment `GUI:=true`\n<pre>roslaunch walkyto bell_and_faraday_world.launch GUI:=true</pre>\n\n### Package Abstract\n- `/launch` : has launch files to launch processes with certain parameters.\n- `/models` : has our robot descriptions(bell & faraday).\n- `/src` : has main sources.\n\t* `network_gen.py` : networks generator code. This process generate genes and save in the `/genes` directory. Then choose random 4 genes and give the genes to the simulator until all population is done.\n\t* `simulator.py` : When receive the genes from generator, simulate the network. During the simulating, this process communicate with gazebo. \n\t* `/genes` : gene files generated from network generator.\n\t* `neat-checkpoint-*` : save generation files(pickle)\n\t* Other files are for supporting above files. Please focus on the every first lines with 'import' code.\n- `/srv` : has a service file for generator to communicate with simulators.\n- `/worlds` : has world files. It includes informations to describe physics and simulation env. configuration.\n" }, { "alpha_fraction": 0.6213791966438293, "alphanum_fraction": 0.6367034316062927, "avg_line_length": 25.760000228881836, "blob_id": "a5c03e06839409639f4e0e292cfcaf9bc055666f", "content_id": "53b81fe35e36683c3c943e69110b65435b053b62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5351, "license_type": "no_license", "max_line_length": 96, "num_lines": 200, "path": "/src/network_gen.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os, pickle\nimport rospy\nimport neat, visualize\nimport time\n\nfrom walkyto.srv import *\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Empty\n\n\ndef gazebo_clear():\n\trospy.wait_for_service('gazebo/reset_simulation')\n\ttry:\n\t\trs_sim = rospy.ServiceProxy('gazebo/reset_simulation', Empty)\n\n\t\tresp = rs_sim.call()\n\n\texcept rospy.ServiceException, e:\n\t\tprint \"Service call failed: %s\"%e\n\ndef world_clear():\n\trospy.wait_for_service('gazebo/reset_world')\n\ttry:\n\t\trs_sim = rospy.ServiceProxy('gazebo/reset_world', Empty)\n\n\t\tresp = rs_sim.call()\n\n\t\tprint(\"world reset!\")\n\n\texcept rospy.ServiceException, e:\n\t\tprint \"Service call failed: %s\"%e\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef gene_clear():\n\tglobal local_dir\n\n\tgenes_list = os.listdir(\"%s/genes\"%local_dir)\n\tfor rm_file in genes_list:\n\t\tos.remove(\"%s/genes/%s\" % (local_dir,rm_file))\n\ndef gene_management(genomes):\n\tglobal local_dir\n\n\tgenome_list = []\n\tgenes_list = os.listdir(\"%s/genes\"%local_dir)\n\tfor genome_id, genome in genomes:\n\t\tgenome_list.append(str(genome.key))\n\n\ts1 = set(genes_list); s2 = set(genome_list)\n\tadd_list = [x for x in genome_list if x not in s1]\n\trm_list = [x for x in genes_list if x not in s2]\n\n\tfor rm_file in rm_list:\n\t\tos.remove(\"%s/genes/%s\" % (local_dir,rm_file))\n\n\tfor genome_id, genome in genomes:\n\t\tgen_file = open(\"%s/genes/%d\" % (local_dir,genome_id),'w')\n\t\tpickle.dump(genome, gen_file)\n\ndef gene_id_publisher(gene_string):\n\tpub = rospy.Publisher('gene_pub', String, queue_size=10)\n\tpub.publish(gene_string)\n\ndef fit_caller(channel):\n\trospy.wait_for_service('sim_run%d'%channel)\n\ttry:\n\t\tSim_Run = rospy.ServiceProxy('sim_run%d' % channel, SimRun, persistent=True)\n\n\t\tresp = Sim_Run.call(SimRunRequest(True))\n\n\t\tif resp.success:\n\t\t\treturn resp.distance\n\t\telse:\n\t\t\treturn -1\n\n\texcept rospy.ServiceException, e:\n\t\tprint \"Service call failed: %s\"%e\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef eval_genomes(genomes, config):\n\tgene_management(genomes)\n\tid_list = []; gene_list = []\n\tfor gene_id, gene in genomes:\n\t\tid_list.append(gene_id)\n\t\tgene_list.append(gene)\n\n\tdup_num = 8\n\tpopulation = len(id_list)\n\ttemp_list = list(id_list)\n\n\n\twhile(temp_list != []):\n\t\tgene_string = str(temp_list.pop())\n\n\t\tfor i in range(1,dup_num):\n\t\t\tif len(temp_list) > 0:\n\t\t\t\tgene_string = gene_string + '/' + str(temp_list.pop())\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tprint(\"-----------simulation(%d/%d)--------------\"%(population-len(temp_list), population))\n\t\tprint(\"gene_id:\", gene_string)\n\t\tgene_id_publisher(gene_string)\n\t\t\n\n\t\tfcnt = 0; fit_list=[]\n\t\twhile fcnt < dup_num:\n\t\t\tgene_id_publisher('-%d'%fcnt)\n\t\t\tfitness = fit_caller(fcnt+1)\n\t\t\tif fitness != -1:\n\t\t\t\tfit_list.append(fitness)\n\t\t\t\ta_gen = gene_list.pop()\n\t\t\t\ta_gen.fitness = fitness\n\t\t\t\tfcnt = fcnt + 1\n\t\t\telse:\n\t\t\t\ttime.sleep(0.2)\n\n\t\t\tif len(gene_list) == 0:\n\t\t\t\tbreak\n\n\t\t# world_clear()\n\t\tgazebo_clear()\n\t\tprint(\"fit:\", fit_list)\n\n\n\tglobal local_dir, stats, p\n\tif p.generation % 10 ==9:\n\t\tstat_file = open(\"%s/stats%d\" % (local_dir, p.generation),'w')\n\t\tpickle.dump(stats, stat_file)\n\t# gazebo_clear()\n\t# print 'pass'\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef run(config_file, max_iter):\n\tglobal stats, p\n # Load configuration\n\tconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n\t\t\t\t\t\tneat.DefaultSpeciesSet, neat.DefaultStagnation,\n\t\t\t\t\t\tconfig_file)\n\n\t# Create the population, which is the top-level object for a NEAT run.\n\tp = neat.Population(config)\n\n\t# Add a stdout reporter to show progress in the terminal.\n\tp.add_reporter(neat.StdOutReporter(True))\n\n\tstats = neat.StatisticsReporter()\n\tp.add_reporter(stats)\n\tp.add_reporter(neat.Checkpointer(generation_interval=10,time_interval_seconds=None))\n\n\t# Run for up to 300 generations.\n\twinner = p.run(eval_genomes, n=int(max_iter))\n\n\n\tnode_names = {-1:'ML-0(I)', -2:'ML-1(I)', -3:'ML-2(I)', -4:'ML-3(I)',-5:'ML-4(I)',\n\t\t\t\t-6:'MR-5(I)',-7:'MR-6(I)',-8:'MR-7(I)',-9:'MR-8(I)',-10:'MR-9(I)',\n\t\t\t\t0:'ML-0(O)', 1:'ML-1(O)', 2:'ML-2(O)', 3:'ML-3(O)',4:'ML-4(O)',5:'MR-5(O)',\n\t\t\t\t6:'MR-6(O)',7:'MR-7(O)', 8:'MR-8(O)',9:'MR-9(O)'}\n\tvisualize.draw_net(config, winner, view=True, node_names=node_names)\n\tvisualize.plot_stats(stats, ylog=False, view=True)\n\tvisualize.plot_species(stats, view=True)\n\n\ndef load_checkpoint(ckp_name, max_iter):\n\tglobal stats,p\n\tp = neat.Checkpointer.restore_checkpoint(ckp_name)\n\tp.add_reporter(neat.StdOutReporter(True))\n\n\tstats = neat.StatisticsReporter()\n\tp.add_reporter(stats)\n\tp.add_reporter(neat.Checkpointer(generation_interval=10,time_interval_seconds=None))\n\n\n\tp.run(eval_genomes, max_iter)\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n\t# rospy.init_node('network_gen', anonymous=True)\t\n\trospy.init_node('network_gen')\n\targv = rospy.myargv()\n\t\n\tfor i in range(20):\n\t\tgene_id_publisher('-');time.sleep(0.25)\n\n\tlocal_dir = os.path.dirname(__file__)\n\tconfig_path = os.path.join(local_dir, 'config-feedforward')\n\t\n\n\tif not os.path.exists(\"%s/genes\"%local_dir):\n\t\tos.makedirs(\"%s/genes\"%local_dir, 0766)\n\telse:\n\t\tgene_clear()\n\n\t\t\n\tif argv[2] == '-r':\n\t\trun(config_path, argv[1])\n\telif argv[2] == '-l':\n\t\tload_checkpoint(argv[3],argv[1])" }, { "alpha_fraction": 0.6287128925323486, "alphanum_fraction": 0.6369637250900269, "avg_line_length": 25.34782600402832, "blob_id": "1952ac101c2972f43c0a1b6e5e4e9fa119b275af", "content_id": "98171dca0f01c2cb6967b4d310ed1329a0225501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/src/delete_model_client.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\nimport rospy\nfrom gazebo_msgs.srv import *\n\ndef delete_model(model_name):\n rospy.wait_for_service('gazebo/delete_model')\n \n try:\n # create a handle to the add_two_ints service\n delete_model = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n \n # simplified style\n resp1 = delete_model(model_name)\n # print(resp1.success)\n print(resp1.status_message)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\n# if __name__ == \"__main__\":\n# \tmodel_name1 = 'MR_bell_imu'\n# \tdelete_model(model_name1)\n" }, { "alpha_fraction": 0.633234977722168, "alphanum_fraction": 0.650934100151062, "avg_line_length": 28.91176414489746, "blob_id": "28f241094db9cb89bdf6ee210abdbddc47f993fa", "content_id": "9d01bc90a795dd7b62cae70c124022e068f6dd3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 104, "num_lines": 34, "path": "/src/apply_joint_effort_client.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\nimport rospy\nfrom gazebo_msgs.srv import *\n\nclass time:\n\tdef __init__(self, secs, nsecs):\n\t\tself.secs = secs\n\t\tself.nsecs = nsecs\n\ndef apply_joint_effort(joint_name, effort, start_time, duration ):\n rospy.wait_for_service('gazebo/apply_joint_effort')\n \n try:\n # create a handle to the add_two_ints service\n apply_joint = rospy.ServiceProxy('gazebo/apply_joint_effort', ApplyJointEffort, persistent=True)\n \n # simplified style\n resp1 = apply_joint(joint_name, effort, start_time, duration)\n # print(resp1.success)\n # print(resp1.status_message)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\n# if __name__ == \"__main__\":\n# \tjoint_name1 = 'MS_Faraday_imu::1'\n# \tjoint_name2 = 'MS_Faraday_HW_0::1'\n# \teffort = 4000\n# \tstart_time = time(0, 0)\n# \tduration = time(1, 0)\n\n# \tapply_joint_effort(joint_name1, effort, start_time, duration)\n# \t# apply_joint_effort(joint_name2, effort, start_time, duration)\n" }, { "alpha_fraction": 0.5363572239875793, "alphanum_fraction": 0.5651252865791321, "avg_line_length": 39.478572845458984, "blob_id": "937dab72070ba157ebcfffd1c4c25a93e8339e44", "content_id": "84d4c0fc768933b4929a467a15626c385ca72eb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5666, "license_type": "no_license", "max_line_length": 144, "num_lines": 140, "path": "/src/quaternion.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import rospy, math, numpy as np\nfrom gazebo_msgs.msg import LinkStates\n\nlink_reference = {'ML-0':'CORE','ML-1':'L2',\n'ML-2':'L3-1','ML-3':'L3-1','ML-4':'L4','MR-5':'CORE',\n'MR-6':'R2','MR-7':'R3-1','MR-8':'R3-1','MR-9':'R4'}\n\ndef dotted_func(self, data):\n model_name = '%s_%d' % (self.model_name, self.dup_num)\n position_list = [0,0,0,0,0,0,0,0,0,0]\n link_position = []; M_position = []; rel_pos = []\n\n for key in ['L1','L5','R1','R5']:\n joint_name = '%s::%s'%(model_name,key)\n if joint_name in data.name:\n link_position.append(data.pose[data.name.index(joint_name)].position)\n\n for M_name in ['ML-1','ML-4','MR-6','MR-9']:\n joint_name = '%s::%s'%(model_name,M_name)\n if joint_name in data.name:\n M_position.append(data.pose[data.name.index(joint_name)].position)\n\n if len(link_position)==4 and len(M_position)==4:\n for i in range(4):\n rel_pos.append([link_position[i].x - M_position[i].x,link_position[i].y - M_position[i].y,link_position[i].z - M_position[i].z])\n else:\n # print len(link_position),len(M_position)\n return [0,0,0,0,0,0,0,0,0,0]\n\n for key in list(self.link_reference.keys()):\n link_name = key\n reference_name = self.link_reference[key]\n joint_name = '%s::%s'%(model_name,link_name)\n ref_joint_name = '%s::%s'%(model_name,reference_name)\n\n # orientation = data.pose[data.name.index(joint_name)].orientation\n # ref_orientation = data.pose[data.name.index(ref_joint_name)].orientation\n if joint_name in data.name:\n position = data.pose[data.name.index(joint_name)].position\n ref_position = data.pose[data.name.index(ref_joint_name)].position\n relative_pos = np.array([position.x - ref_position.x,position.y - ref_position.y,position.z - ref_position.z])\n # print(np.linalg.norm(relative_pos))\n # print(relative_pos)\n\n if key[3] == '0':\n core_vec_L = relative_pos\n\n L3_vec = data.pose[data.name.index('%s::L3-1'%(model_name))].position\n ML0_vec = [L3_vec.x - position.x,L3_vec.y - position.y,L3_vec.z - position.z]\n position_list[int(key[3])] = np.array(ML0_vec)\n\n elif key[3] == '5':\n core_vec_R = relative_pos\n\n R3_vec = data.pose[data.name.index('%s::R3-1'%(model_name))].position\n ML5_vec = [R3_vec.x - position.x,R3_vec.y - position.y,R3_vec.z - position.z]\n position_list[int(key[3])] = np.array(ML5_vec)\n\n else:\n position_list[int(key[3])] = relative_pos \n\n\n position_list.insert(10,rel_pos[3]);position_list.insert(6,rel_pos[2]);position_list.insert(5,rel_pos[1]);position_list.insert(1,rel_pos[0])\n # for i in range(14):\n # print(np.linalg.norm(position_list[i]))\n cross_vec = [ np.cross(core_vec_L,position_list[0]), \n np.cross(position_list[1],position_list[2]),\n np.cross(position_list[2],position_list[3]),\n np.cross(position_list[4],position_list[5]),\n np.cross(position_list[5],position_list[6]),\n \n np.cross(core_vec_R,position_list[7]),\n np.cross(position_list[9],position_list[8]),\n np.cross(position_list[10],position_list[9]),\n np.cross(position_list[12],position_list[11]),\n np.cross(position_list[13],position_list[12])]\n # print(cross_vec)\n # print(position_list)\n dotted =[]\n for i in range (10):\n if i ==0:\n val = np.dot(position_list[3], cross_vec[i])\n elif i ==5:\n val = np.dot(position_list[4], cross_vec[i])\n elif i < 5:\n val = np.dot(core_vec_L,cross_vec[i])\n else:\n val = np.dot(core_vec_R,cross_vec[i])\n \n if val==0:\n dotted.append(0)\n else:\n dotted.append(val/abs(val))\n \n return dotted\n\n\ndef call_quat(data):\n model_name = 'MS_Faraday_imu'\n rel_q_list = [0,0,0,0,0,0,0,0,0,0]\n print(\"======================================\")\n for key in list(link_reference.keys()):\n link_name = key\n reference_name = link_reference[key]\n joint_name = '%s::%s'%(model_name,link_name)\n ref_joint_name = '%s::%s'%(model_name,reference_name)\n\n orientation = data.pose[data.name.index(joint_name)].orientation\n ref_orientation = data.pose[data.name.index(ref_joint_name)].orientation\n\n q2_vec = np.array([orientation.x,orientation.y,orientation.z])\n q2_0 = orientation.w\n\n q1_vec = np.array([ref_orientation.x,ref_orientation.y,ref_orientation.z])\n q1_0 = ref_orientation.w\n\n rel_q_w = q1_0*q2_0 + np.dot(q1_vec, q2_vec)\n rel_q_vec = - q1_0*q2_vec + q2_0*q1_vec + np.cross(q2_vec,q1_vec)\n # ======================================================== #\n rel_q_list[int(key[3])] = rel_q_w\n\n dotted = dotted_func(data)\n for i in range(10):\n print(i, \"ang :\", math.acos(rel_q_list[i])*2*180/3.141592*dotted[i])\n\n\ndef listener():\n\n # In ROS, nodes are uniquely named. If two nodes with the same\n # name are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n rospy.init_node('position listener', anonymous=True)\n rospy.Subscriber('/gazebo/link_states', LinkStates, call_quat)\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n listener()" }, { "alpha_fraction": 0.5532240271568298, "alphanum_fraction": 0.5690661668777466, "avg_line_length": 30.700441360473633, "blob_id": "99af1a5ab731e0ee81d65a822dc26714762c786b", "content_id": "b41b4d7f4a57f83f1771579053a4ac9f5daf6e06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7196, "license_type": "no_license", "max_line_length": 112, "num_lines": 227, "path": "/src/simulator.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport neat, visualize\nimport os, pickle, sys, numpy as np\nimport rospy\nimport math, random\n\nfrom quaternion import dotted_func\nfrom gazebo_msgs.srv import *\nfrom gazebo_msgs.msg import *\nfrom walkyto.srv import *\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Empty\n\nfrom apply_joint_effort_client import *\nfrom spawn_model_client import *\nfrom delete_model_client import *\n\nclass Twist :\n\tdef __init__(self, linear, angular):\n\t\tself.linear = linear\n\t\tself.angular = angular\n\nclass Vector3 :\n\tdef __init__(self, x, y, z):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\nclass simulator:\n\n\tdef gazebo_init(self):\t\t\n\t\tlocal_dir = os.getenv(\"GAZEBO_MODEL_PATH\")\n\t\tsdf_file = open('%s/%s/model.sdf'%(local_dir, self.model_name), 'r')\n\t\tmodel_xml = sdf_file.read()\n\t\tmodel_name = '%s_%d' % (self.model_name, self.dup_num)\n\t\trobot_namespace = model_name\n\n\t\tpos = position(0, (self.dup_num-1)*1.8 - (self.total_dup-1)*0.9, 0)\n\t\torient = orientation(0, 0, 0, 0)\n\t\tinitial_pose = pose(pos, orient)\n\t\treference_frame=''\n\n\t\tspawn_model(model_name, model_xml, robot_namespace, initial_pose, reference_frame)\n\n\tdef gazebo_exit(self):\n\t\tdelete_model('%s_%d' % (self.model_name, self.dup_num))\n\t##############################################################################################################\n\n\tdef state_getter(self, data):\n\t\tmodel_name = '%s_%d' % (self.model_name, self.dup_num)\n\t\tvel_list = [0,0,0,0,0,0,0,0,0,0]; q_list = [0,0,0,0,0,0,0,0,0,0]\n\n\t\tfor key in list(self.link_reference.keys()):\n\t\t\tlink_name = key\n\t\t\treference_name = self.link_reference[key]\n\t\t\tjoint_name = '%s::%s'%(model_name,link_name)\n\t\t\tref_joint_name = '%s::%s'%(model_name,reference_name)\n\n\t\t\tif joint_name in data.name:\n\t\t\t\ttwist = data.twist[data.name.index(joint_name)].angular\n\t\t\t\tref_twist = data.twist[data.name.index(ref_joint_name)].angular\n\n\t\t\t\tvel = math.sqrt((twist.x-ref_twist.x)**2+(twist.y-ref_twist.y)**2+(twist.z-ref_twist.z)**2)\n\t\t\t\tvel_list[int(key[3])] = vel\n\t\t\t\t#=========================================================================================================\n\t\t\t\torientation = data.pose[data.name.index(joint_name)].orientation\n\t\t\t\tref_orientation = data.pose[data.name.index(ref_joint_name)].orientation\n\n\t\t\t\tq2_vec = np.array([orientation.x,orientation.y,orientation.z]); q2_0 = orientation.w\n\t\t\t\tq1_vec = np.array([ref_orientation.x,ref_orientation.y,ref_orientation.z]); q1_0 = ref_orientation.w\n\n\t\t\t\trel_q_w = q1_0*q2_0 + np.dot(q1_vec, q2_vec)\n\t\t\t\t# rel_q_vec = - q1_0*q2_vec + q2_0*q1_vec + np.cross(q2_vec, q1_vec)\n\t\t\t\tq_list[int(key[3])] = math.acos(rel_q_w)*2\n\n\t\tdotted = dotted_func(self, data)\n\t\tfor i in range(10):\n\t\t\tq_list[i] *= dotted[i]\n\n\t\tself.joint_states = list(vel_list)+list(q_list)\n\t\t\n\n\n\tdef efforts_caller(self, joint_efforts, duration):\n\t\tfor i in range(len(joint_efforts)):\n\t\t\tapply_joint_effort('%s_%d::%d'%(self.model_name,self.dup_num,i), joint_efforts[i], time(0,0), duration)\n\n\t###############################################################################################################\n\tdef get_pose(self):\n\t\trospy.wait_for_service('gazebo/get_link_state')\n\t\ttry:\n\t\t\t# create a handle to the add_two_ints service\n\t\t\tget_model_state = rospy.ServiceProxy('gazebo/get_link_state', GetLinkState)\n\n\t\t\tmodel_name = '%s_%d' % (self.model_name, self.dup_num)\n\t\t\t# formal style\n\t\t\tresp = get_model_state.call(GetLinkStateRequest('%s::CORE'%model_name, ''))\n\n\t\t\tpos = resp.link_state.pose.position\n\n\t\t\treturn pos\n\n\t\texcept rospy.ServiceException, e:\n\t\t\tprint \"Service call failed: %s\"%e\n\t##############################################################################################################\n\tdef string_decoder(self, g_str):\n\t\tg_str = str(g_str.data).split('/')\n\t\t# for g in g_str:\n\t\t# \tif g == '-1':\n\t\t# \t\tcontinue\n\t\t# \tos.write(sys.__stderr__.fileno(), \"[%d:%s]\"%(self.dup_num,g))\n\t\tif len(g_str) > (self.dup_num-1):\n\t\t\treturn g_str[self.dup_num-1]\n\t\telse:\n\t\t\treturn -1\n\n\tdef call_simulate(self, data):\n\t\tif data.data[0] == '-':\n\t\t\treturn\n\n\t\tlocal_dir = os.path.dirname(__file__)\n\t\tconfig_file = os.path.join(local_dir, 'config-feedforward')\n\n\t\tconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n\t\t\t\t\t\t\tneat.DefaultSpeciesSet, neat.DefaultStagnation,\n\t\t\t\t\t\t\tconfig_file)\n\n\t\tgene_dir = os.getenv(\"WALKYTO_PATH\")\n\t\tgene_id = self.string_decoder(data)\n\t\tif gene_id == -1 or gene_id == '-1':\n\t\t\treturn\n\t\t# os.write(sys.__stderr__.fileno(), \"%s\\n\"%gene_id)\n\n\t\tgene_f = open('%s/src/genes/%s' % (gene_dir, gene_id),'rb')\n\t\tgenome = pickle.load(gene_f)\n\t\tgene_f.close()\n\n\t\tnet = neat.nn.FeedForwardNetwork.create(genome, config)\n\t\t#nn.recurrent.RecurrentNetwork ************** param3\n\t\t#-------------------------------------------------------------------------------------------------\n\t\tnow = rospy.Time.now()\n\n\t\tduration = rospy.Duration(180)\n\t\tthen = now + duration\t\t\n\n\t\t#self.gazebo_init()\n\t\tpos_init = self.get_pose()\n\t\t#--------------------------------------------------------------------------------------------------\n\t\tdur = rospy.Duration(0.3); gap = rospy.Duration(0)\n\t\tMAX_TORQUE = 3.0\n\t\twhile(then > now):\n\t\t \tjoint_efforts = net.activate(self.joint_states)\n\t\t \tfor i in range(10):\n\t\t \t\tif joint_efforts[i] > MAX_TORQUE:\n\t\t \t\t\tjoint_efforts[i] = MAX_TORQUE\n\n\t\t \tself.efforts_caller(joint_efforts, dur)#0.05sec\n\t\t \trospy.sleep(dur)\n\t\t \t# print \"input:\", self.joint_states\n\t\t \t# print \"output:\", joint_efforts\n\t\t \tnow = rospy.Time.now()\n\t\t \t\n\t\t\tif gap == rospy.Duration(0):\n\t\t\t\tif then-now > duration + rospy.Duration(5):\n\t\t\t\t\tgap = then-now\n\t\t\telif gap > rospy.Duration(0):\n\t\t\t\tnow = now + gap - duration\n\n\n\t\t \t# print self.dup_num, gap.to_sec(), then.to_sec(), now.to_sec()\n\t\t \t\n\t\t#--------------------------------------------------------------------------------------------------\t\n\t\tpos_end = self.get_pose()\n\n\t\tdist = (pos_end.x - pos_init.x)\n\n\t\tself.fitness = dist\n###############################################################################\n\t\n\tdef fit_server(self, req):\n\t\tif self.fitness == None:\n\t\t\treturn SimRunResponse(self.fitness, False)\n\t\telse:\n\t\t\tfitness = self.fitness\n\t\t\tself.fitness = None\n\t\t\treturn SimRunResponse(fitness, True)\n\n\tdef __init__(self, model_name, dup_t):\n\t\tself.total_dup = int(dup_t)\n\t\t\n\t\tself.model_name = model_name[:-len(model_name.split('_')[-1])-1]\n\t\tself.dup_num = int(model_name.split('_')[-1])\n\t\tself.joint_states = None\n\t\tself.fitness = None\t\t\n\n\n\t\tif self.model_name == 'MS_Faraday_d':\n\t\t\tself.link_reference = {'ML-0':'CORE','ML-1':'L2',\t'ML-2':'L3-1','ML-3':'L3-1',\n\t\t\t\t\t\t\t'ML-4':'L4','MR-5':'CORE','MR-6':'R2','MR-7':'R3-1',\n\t\t\t\t\t\t\t'MR-8':'R3-1','MR-9':'R4'}\n\t\telse:\n\t\t\tprint 'there is not link_reference of %s' % self.model_name\n\t\t\tsys.exit()\n\n\t\trospy.init_node('simulator%d' % self.dup_num)\n\t\t\n\t\t\n\t\trospy.Subscriber('/gazebo/link_states', LinkStates, self.state_getter)\n\n\t\t# spin() keeps Python from exiting until node is shutdown\n\t\tself.gazebo_exit()\n\t\tself.gazebo_init()\n\n\t\trospy.Subscriber('gene_pub', String, self.call_simulate)\n\t\ts=rospy.Service('sim_run%d' % self.dup_num, SimRun, self.fit_server)\n\n\t\t\n\t\trospy.spin()\n\n\nif __name__ == '__main__':\n\targv = rospy.myargv()\n\tmodel_name = argv[1]\n\tdup_num = argv[2]\n\n\n\tsim = simulator('%s'%model_name, dup_num)\n" }, { "alpha_fraction": 0.6415094137191772, "alphanum_fraction": 0.6582809090614319, "avg_line_length": 28.204082489013672, "blob_id": "aaa42a3b2b9694024e80621e695c1a85d9b7a76c", "content_id": "07d4e1bb1f08069cdbbb40341c1fe0abe393f1f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 107, "num_lines": 49, "path": "/src/spawn_model_client.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\nimport rospy\nfrom gazebo_msgs.srv import *\n\nclass pose :\n\tdef __init__(self, position, orientation):\n\t\tself.position = position\n\t\tself.orientation = orientation\n\nclass position :\n\tdef __init__(self, x, y, z):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\nclass orientation :\n\tdef __init__(self, x, y, z, w):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\t\tself.w = w\n\ndef spawn_model(model_name, model_xml, robot_namespace, initial_pose, reference_frame):\n rospy.wait_for_service('gazebo/spawn_sdf_model')\n \n try:\n # create a handle to the add_two_ints service\n spawn_model = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)\n \n # simplified style\n resp1 = spawn_model(model_name, model_xml, robot_namespace, initial_pose, reference_frame)\n # print(resp1.success)\n print(resp1.status_message)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\n# if __name__ == \"__main__\":\n# \tmodel_name1 = 'MR_bell_imu'\n# \tsdf_file = open('/home/seunghwanyu/catkin_ws/src/WalkYTo/models/MR_bell_imu/MR_bell_imu_model.sdf','r')\n# \tmodel_xml1 = sdf_file.read()\n# \trobot_namespace1 = 'MR_bell_imu'\n# \tposition1 = position(1,1,0)\n# \torientation1 = orientation(0, 0, 0, 0)\n# \tinitial_pose1 = initial_pose(position1, orientation1)\n# \treference_frame1 = ''\n\n# \tspawn_model(model_name1, model_xml1, robot_namespace1, initial_pose1, reference_frame1)\n" }, { "alpha_fraction": 0.5895196795463562, "alphanum_fraction": 0.6026200652122498, "avg_line_length": 18.95652198791504, "blob_id": "b0812441cad2192776131f7ee32293c9591c417e", "content_id": "d0a88734891636096ba212195f22a890f664e099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 36, "num_lines": 23, "path": "/src/analysis_tools/test.py", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "# import os, signal, time\n# import rospy\n\n# pid = os.fork()\n# if pid == 0:\n# \tos.execlp(\"roscore\", 'roscore')\n# else:\n# \tprint( \"Im waiting\")\n# \ttime.sleep(5)\n# \tos.kill(pid, signal.SIGINT)\n# \tprint(\"I'll kill you!\")\n# \tos.wait()\n# \tprint(\"done\")\n\ndef string_decoder(g_str, dup_num):\n\tg_str = str(g_str).split('/')\n\tprint g_str\n\tif len(g_str) > (dup_num-1):\n\t\treturn g_str[dup_num-1]\n\telse:\n\t\treturn -1\nfor i in range(5):\n\tprint string_decoder(input(\":\"), i)" }, { "alpha_fraction": 0.757709264755249, "alphanum_fraction": 0.7643171548843384, "avg_line_length": 18.782608032226562, "blob_id": "8ef514edcb8b58c7d652dab501508fed4b288273", "content_id": "903b4970fdb5f2c6a42cfc31344a0f196856bfd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 454, "license_type": "no_license", "max_line_length": 65, "num_lines": 23, "path": "/CMakeLists.txt", "repo_name": "CUN-bjy/WalkYTo", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(walkyto)\n\nfind_package(catkin REQUIRED COMPONENTS message_generation rospy)\n\ncatkin_python_setup()\n\nadd_service_files(\n DIRECTORY srv \n FILES SimRun.srv\n)\n\n## Generate services in the 'srv' folder\n# add_service_files(\n# FILES # e.g. Floats.srv HeaderString.srv\n#)\n\n## Generate added messages and services with any dependencies\ngenerate_messages()\n\ncatkin_package(\n CATKIN_DEPENDS message_runtime rospy\n)" } ]
12
Aries000004/pythonCrawl
https://github.com/Aries000004/pythonCrawl
3dca9d30a9fdf06431e819115afa4362dfc9ea76
855c306b5b0cd937ca53507d59bae41ff877650e
7deb7bc3d8918913db8709d6ff27a1f76d2177db
refs/heads/master
2020-03-25T07:58:41.740474
2018-08-01T05:37:52
2018-08-01T05:37:52
140,301,352
0
0
null
2018-07-09T14:57:43
2018-07-09T14:57:59
2018-08-01T02:15:20
Python
[ { "alpha_fraction": 0.600941002368927, "alphanum_fraction": 0.6146278977394104, "avg_line_length": 28.607595443725586, "blob_id": "1286650c73acc5dab86c8e2189c0d02f5da01500", "content_id": "a78d75d9d0646860224cd62dc32da7a99034efa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2422, "license_type": "no_license", "max_line_length": 96, "num_lines": 79, "path": "/django_rest_api/api/views.py", "repo_name": "Aries000004/pythonCrawl", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework.views import APIView\nfrom . import models\nfrom django.views.generic.base import View\nimport json\nfrom django.http import HttpResponse\nfrom rest_framework.request import Request\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import BasicAuthentication\n\n# Create your views here.\nORDER_DICT = {\n 1:{\n 'name':'apple',\n 'price':15\n },\n 2:{\n 'name':'dog',\n 'price':100\n }\n}\n\ndef md5(user):\n import hashlib\n import time\n ctime = str(time.time())\n m = hashlib.md5(bytes(user,encoding='utf-8'))\n m.update(bytes(ctime,encoding='utf-8'))\n\n return m.hexdigest()\n\n\n\nclass AuthView(View):\n def get(self,request):\n ret = {'code': 1000, 'msg': None}\n return JsonResponse(ret)\n def post(self,request,*args,**kwargs):\n ret = {'code':1000,'msg':None}\n try:\n user = request.POST.get('username')\n pwd = request.POST.get('password')\n obj = models.UserInfo.objects.filter(username=user,password=pwd).first()\n if not obj:\n ret['code'] = 1001\n ret['msg'] = u'็”จๆˆทๅๆˆ–ๅฏ†็ ้”™่ฏฏ'\n token = md5(user)\n models.UserToken.objects.update_or_create(user=obj,defaults={'token':token})\n ret['token'] = token\n except Exception as e:\n ret['code'] = 1002\n ret['msg'] = u'่ฏทๆฑ‚ๅผ‚ๅธธ'\n print(e)\n return HttpResponse(json.dumps(ret,ensure_ascii=False), content_type=\"application/json\")\n\nclass Authentication(APIView):\n '''่ฎค่ฏ'''\n def authenticate(self,request):\n token = request._request.GET.get('token')\n token_obj = models.UserToken.objects.filter(token=token).first()\n if not token_obj:\n raise exceptions.AuthenticationFailed('็”จๆˆท่ฎค่ฏๅคฑ่ดฅ')\n #ๅœจrest frameworkๅ†…้ƒจไผšๅฐ†่ฟ™ไธคไธชๅญ—ๆฎต่ต‹ๅ€ผ็ป™request๏ผŒไปฅไพ›ๅŽ็ปญๆ“ไฝœไฝฟ็”จ\n return (token_obj.user,token_obj)\n\n def authenticate_header(self, request):\n pass\n\n\nclass OrderView(APIView):\n authentication_classes = [Authentication,]\n def get(self,request,*args,**kwargs):\n ret = {'code':1000,'msg':None,'data':None}\n try:\n ret['data'] = ORDER_DICT\n except Exception as e:\n pass\n return JsonResponse(ret)" }, { "alpha_fraction": 0.35514017939567566, "alphanum_fraction": 0.37383177876472473, "avg_line_length": 21.29166603088379, "blob_id": "138f394113e5e61792b0e44c0016de4ce9826b3c", "content_id": "1f6e3da8f760edbb89b2bce529598880d5bc4061", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 52, "num_lines": 24, "path": "/others/huahua.py", "repo_name": "Aries000004/pythonCrawl", "src_encoding": "UTF-8", "text": "# with open(r'C:/test/110.txt', 'r') as f:\n# count = 0\n# for i in f.readlines():\n# res = i.split(' ')\n# # print(res)\n# item = []\n# for j in res:\n# if j:\n# j = j.replace('\\n','')\n# item.append(j)\n# # print(item)\n# if len(item)>2:\n# count +=1\n# print(\"'%s':'%s',\"%(item[0],item[-1]))\n# print(count)\ndef run():\n try:\n return '1'\n except:\n pass\n finally:\n return '2'\n\nprint(run())\n" }, { "alpha_fraction": 0.4749034643173218, "alphanum_fraction": 0.5366795659065247, "avg_line_length": 27.88888931274414, "blob_id": "304077d90de180f05383b6ca62203a7d7c080210", "content_id": "16bd0d288ddc33539e71db7354cdae23a224befe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/others/Auto_v3.py", "repo_name": "Aries000004/pythonCrawl", "src_encoding": "UTF-8", "text": "login_list ={\n 'tingting':'tingting123',\n 'gaolei':'gaolei123',\n 'caoxinpeng':'caoxinpeng123',\n 'wangzhibin':'wangzhibin123',\n 'hushan':'hushan123'\n }\nlogin = 'tingt1ing'\nprint(login in login_list)" }, { "alpha_fraction": 0.6815125942230225, "alphanum_fraction": 0.699999988079071, "avg_line_length": 31.189189910888672, "blob_id": "bfa113e91994d11100df7991dcfad56a858f851d", "content_id": "100f817ecee202c87e38a6bd8656b97f989b9719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 88, "num_lines": 37, "path": "/django_rest_api/api/models.py", "repo_name": "Aries000004/pythonCrawl", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom pygments.lexers import get_all_lexers\nfrom pygments.styles import get_all_styles\n# Create your models here.\n\nLEXERS = [item for item in get_all_lexers() if item[1]]\nLANGUAGE_CHOICE = sorted([(item[1][0],item[0]) for item in LEXERS])\nSTYLE_CHOICE = sorted((item,item)for item in get_all_styles())\n\n\nclass Snippet(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n title = models.CharField(max_length=100,blank=True,default='')\n code = models.TextField()\n lineos = models.BooleanField(default=False)\n language = models.CharField(choices=LANGUAGE_CHOICE,default='python',max_length=100)\n style = models.CharField(choices=STYLE_CHOICE,default='friendly',max_length=100)\n\n class Meta:\n ordering = ('created',)\n\n\nclass UserInfo(models.Model):\n USER_TYPE = (\n (1,'ๆ™ฎ้€š็”จๆˆท'),\n (2,'VIP'),\n (3,'SVIP')\n )\n user_type = models.IntegerField(choices=USER_TYPE)\n username = models.CharField(max_length=32)\n password = models.CharField(max_length=64)\n\n\n\nclass UserToken(models.Model):\n user = models.OneToOneField(UserInfo,on_delete=models.CASCADE)\n token = models.CharField(max_length=64)" } ]
4
resendislab/docker_mini_training
https://github.com/resendislab/docker_mini_training
126577099ed2bbc3ad4e72f83044c83c98d04deb
ce4d8b58bca0ee0cc2d27514d2f67f4d47316eeb
d907d3b721eac1ef61dbd97182728ba1bd789907
refs/heads/master
2021-06-16T17:32:26.246590
2017-05-26T23:34:13
2017-05-26T23:34:13
46,936,773
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.7568590641021729, "alphanum_fraction": 0.7691580057144165, "avg_line_length": 43.04166793823242, "blob_id": "cce5c0f473059ae830ca44e00b3592e4f4d4b40d", "content_id": "7f982ea34f4f761e26d4e496145290a5531ed258", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1057, "license_type": "permissive", "max_line_length": 125, "num_lines": 24, "path": "/README.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "![Docker logo](https://upload.wikimedia.org/wikipedia/commons/7/79/Docker_%28container_engine%29_logo.png)\n\n## Mini training\n\nHere you find the materials for my docker mini training.\n\n### Installation\n\nIf you want to run the live parts yourself you will need docker installed on your machine. There are detailed\ninstallation instructions for [Ubuntu](https://docs.docker.com/engine/installation/linux/ubuntu/),\n[Mac OSX](https://docs.docker.com/docker-for-mac/install/) and [Windows](https://docs.docker.com/docker-for-windows/install/)\non the docker home page. However, it is not necessary that you install docker\nfor the training, you can just drag along.\n\n### Material\n\n#### Intro to Docker\n\n1. [A very simple introduction to PaaS and Docker](https://speakerdeck.com/cdiener/docker-mini-training)\n2. [The docker command](docker_command.md)\n3. [Wrecking a Debian installation](wrecking_debian.md)\n4. [Docker files and automated builds](docker_builds.md)\n5. [Storage and docker volumes](docker_volumes.md)\n6. [The docker network stack](docker_networks.md)\n" }, { "alpha_fraction": 0.7020316123962402, "alphanum_fraction": 0.707110583782196, "avg_line_length": 20.349397659301758, "blob_id": "a32f9363eb2f869141ff165f939a4f2f56310b8e", "content_id": "7366b0f9d7ff5f5c35896b99335b0a264362a16b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1772, "license_type": "permissive", "max_line_length": 81, "num_lines": 83, "path": "/wrecking_debian.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "## Wrecking a Debian system\n\nOk, since we are now familiar with docker let's party! We will now spectacularly \ndestroy our Debian system. First we start a new Debian docker container with\n`docker run`.\n\n```bash\ndocker run -it --rm debian /bin/bash\n```\n\nMake sure you are inside your container (your prompt should contain some weird\nnumbers, the ID of the container). In my case it look like this:\n\n```bash\nroot@4f3b533be9c3:/# \n```\n\nOkay, let's go. HULK SMASH!\n\n***Disclaimer: Do NOT try that at home (or better \"at host\").*** You should not \nhave permissions to run the following commands in your host, but better safe than\nsorry. Make sure you are inside the docker container (for instance run apt).\n\nSee all those nice basic programs in /bin?\n\n```bash\nls /bin\n```\n\nUuuuh that nice `date` program...\n\n```bash\ndate\n```\n\nWe will smash it!\n\n```bash\necho \"echo \\\"No time for you!\\\"\" > /bin/date\n```\n\nNow try to run it...\n\n```bash\ndate\n```\n\nUuuuh, what are you gonna do? Run crying to your Mummy?\nHULK SMASH MORE! Okay now let's try to delete everything!\n\n```bash\nrm -rf /\n```\n\nWhat do you think will happen? Nothing? Right, Debian will not let us delete the\nentire OS, but it happily tells us how, so let's try the following one\n\n```bash\nrm -rf / --no-preserve-root\n```\n\nOkay that works but gives some errors that even as root we can not delete some\ndata in /sys. This is because sys does not only contain files but also hardware\naccess. Docker does never allow access to those parts of the host.\n\nWell, now we are in a pickle. Most commands we know are gone, just try\n\n```bash\ncp\nls\nmkdir\n```\n\nHmm, that's weird. Some of the commands are still there!\n\n```bash\necho \"Ha! Puny Hulk cannot kill me!\"\ncd sys\ncd ..\n[ 3 > 2 ] && echo \"TRUE!\" \n```\n\nCan you explain what happened?\n" }, { "alpha_fraction": 0.7518467903137207, "alphanum_fraction": 0.7649794816970825, "avg_line_length": 37.47368240356445, "blob_id": "84c1022be80f40221f6428d3d632eefa75ab69ab", "content_id": "0cd93fb5df0689566536b4690c308049d4130573", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3655, "license_type": "permissive", "max_line_length": 150, "num_lines": 95, "path": "/docker_networks.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "# Networking in docker\n\nNetworking in docker is pretty powerful even though most people never want to\nget beyond \"-p 8000:8000\" :D In fact, docker manges it's own network stack and\na fresh docker installation comes with three configured networks out of the\nbox...\n\nNetworks in docker can be managed with the `network` subcommand. For instance\nwe can list all current networks:\n\n```bash\ndocker network ls\n```\n\nHere we see the three default networks:\n\n- \"bridge\": An isolated network in which containers can only see each other.\n- \"host\": The network of our host.\n- \"none\": A mock network in which each container is completely isolated and only\n sees itself.\n\nSo we will now use a docker image that actually runs a web app so we can see\nwhat happens in the network. We will run a jupyter notebook server (without\na password for now).\n\n```bash\ndocker pull jupyter/minimal-notebook\ndocker run -d jupyter/minimal-notebook start-notebook.sh --NotebookApp.token=''\ndocker ps\n```\n\nSo we see the container exposes the port 8888. But how do we get access to\nthat? By default the container runs in the bridge network. We can \"publish\"\nthe app to our host with the `-p` option of docker run. The syntax is `host_port:container_port` (see, again \"outside:inside\"). Let's kill the app and\ntry again:\n\n```bash\ndocker rm -f gigantic_brown\n\ndocker run -d -p 8888:8888 jupyter/minimal-notebook start-notebook.sh --NotebookApp.token=''\ndocker ps\n```\n\nOpen you browser at 0.0.0.0:8888 and *tada* the app is running (if you use the\ndocker toolbox you need to use the IP of your docker vm instead). So this is\nalready pretty powerfull since we could run several instances of the app and\npublish it two different IPs on the host. But we don't have to do this at all.\n\nThe bridge network in docker has it's own IP range. We can see that with the\n`inspect` command.\n\n```bash\ndocker network inspect bridge\n```\n\nThis gives us the current network configuration. Can you see the entry for our\njupyter container? It actually has its own IP in the bridge network\n(172.17.0.2 in my case). So now open your browser at 172.17.0.2:8888 and *tada*\nour app is there again. When using the bridge network IP you would not even\nneed to use the \"publish\" option `-p`. So yeah, you get an entire emulated network\nstack in which you can interconnect apps as you wish, but whatever :D You can\nstop and remove the jupyter server now.\n\nIt can actually get better. We can also create new networks to connect a specific\nsubset of containers. This is done with the `create` sub-subcommand.\n\n```bash\ndocker network create my_net\ndocker network ls\n```\n\nSo we see we have created a new network. We can launch containers into our new\nnetwork using the `--network` option of docker run. We will launch two\ncontainers: a jupyter app server and a simple debian one.\n\n```bash\ndocker run -d --name= jupyter --network=my_net jupyter/minimal-notebook start-notebook.sh --NotebookApp.token=''\ndocker run -it --rm --name=debian --network debian\n```\n\nNote how we gave names to both containers. The cool thing about custom bridge\nnetworks is that they automatically configure the DNS to use the container\nnames. So in the Debian conatiner we can now simply do\n\n```bash\nping -w 4 jupyter\nping -w 4 debian\n```\n\nThis is pretty neat and can be used to setup complex network stacks on a single\nmachine. Containers may contain to several networks at once so you can set up\ncomplex layered network architectures as well. Docker also has olverlay\nnetworks which can connect several physical hosts into a single network. However,\nif you want that functionality you should probably look into Kubernetes or\nDocker Swarm.\n" }, { "alpha_fraction": 0.676300585269928, "alphanum_fraction": 0.676300585269928, "avg_line_length": 23.714284896850586, "blob_id": "a153aca6eade91cca8d0c97cfcdba024c26b150b", "content_id": "3c232c56c0729c5825ced5f672a63a97d7244e92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 173, "license_type": "permissive", "max_line_length": 50, "num_lines": 7, "path": "/Dockerfile", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "FROM debian\nMAINTAINER \"Weird friend\"\n\nRUN apt-get -y update && apt-get -y install cowsay\nRUN cd /bin && ln -s /usr/games/cowsay \n\nENTRYPOINT [\"/bin/cowsay\", \"-f\", \"vader\"]\n" }, { "alpha_fraction": 0.7584163546562195, "alphanum_fraction": 0.7584163546562195, "avg_line_length": 36.50505065917969, "blob_id": "bf7414223e52e0dde30a05f67c2f70f56a539973", "content_id": "b33427466404f73fd166ed176cb5a15322850c46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3713, "license_type": "permissive", "max_line_length": 96, "num_lines": 99, "path": "/docker_builds.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "## Dockerfiles and builds\n\nOkay, we now have some basic grasp of how to run docker. However, we still do not\nknow how images are **actually** created. So let us build one. We have seen before\nthat we can run our Debian container and install some software to it. We also\nnow that without the `--rm` flag the container will persist. That is a good\nstart let us use this.\n\nBy now we are huge fans of the `cowsay` program and want the entire world to be\nbe able to express themselves through a cow. That is why we want an official\ncowsay docker image out there. So let us start by running our Debian image\nagain and installing cowsay. We will also link cowsay directly into /bin because\nin our Docker container cows are first class citizens.\n\n```bash\ndocker run -it debian /bin/bash\n\n# Now inside the container\napt update && apt install cowsay\ncd /bin && ln -s /usr/games/cowsay\n# Quit with Ctrl-D\n```\n\nBy the way you can get back into the shell of the container with\n`docker start -ia cute_name`.\n\nSo now we have a container with cowsay installed. But how do we create an image\nfrom that? Easy peasy, with `docker commit`.\n\n```bash\ndocker commit -a \"My Name\" cute_name cowsay\n```\n\nThe `-a` flag allows you to define a mantainer for the image and the last argument\nto the command is the name of the image. If you had a (free) Docker Hub account you\ncould now send the image to docker hub with `docker push`. Well done!\n\nNow, you have that friend who is a huge fan of Star Wars and cows and wants to\nchange your image to have a Darth Vader cow saying things. You do not want to change\nyour image but your friend has no idea how to install stuff in Debian. Wouldn't\nit be peachy if you could create some small file with build instructions that\nyour friend could modify to build his own cowsay image? Well that is what\nDockerfiles are for! We will not go into too much details, because you can easily\nread all there is to know about Dockerfiles at http://docs.docker.com/engine/reference/builder/.\n\nSo you send your friend the following Dockerfile:\n\n```Dockerfile\nFROM debian\nMAINTAINER \"Your Name\"\n\nRUN apt-get -y update && apt-get -y install cowsay\nRUN cd /bin && ln -s /usr/games/cowsay\n\nENTRYPOINT [\"/bin/cowsay\"]\n```\n\n`FROM` defines the base image you want to use and `RUN` runs any command you want\ninside of the docker container. There will be now interactive shell so all commands\nhave to run without confirmations. This is why we use `apt-get -y` which replies\n\"yes\" to all necessary confirmations. The `ENTRYPOINT` defines a default command for the\ncontainer so that having your cow say \"Hello!\" is as easy as `docker run --rm cowsay \"Hello!\"`.\n\nYour friend can now easily build his own image by changin into the directory\nwith the Dockerfile and typing\n\n```bash\ndocker build -t vadercow .\n```\n\nThis will pull all the necessary images and build his vadercow image. Your friend\nis happy to see that the following works\n\n```\ndocker run --rm vadercow \"I am still a cow\"\n```\n\nHe makes the following adjustment to the build file\n\n```Dockerfile\nFROM debian\nMAINTAINER \"Weird friend\"\n\nRUN apt-get -y update && apt-get -y install cowsay\nRUN cd /bin && ln -s /usr/games/cowsay\n\nENTRYPOINT [\"/bin/cowsay\", \"-f\", \"vader\"]\n```\nand is delighted to see his Vader cow when running\n\n```bash\ndocker run --rm vadercow \"Hello Weird Friend!\"\n```\n\nDockerfiles are not only practical to define standardize images but they can also\nbe used for automated builds. You can connect Dockerfile from a Github repository\nto your Docker Hub account so that upon any changes in your Dockerfile the\nDocker Hub will build your image on their servers and provide it for downloads.\nFor an example see https://hub.docker.com/r/cdiener/cobra-docker/builds.\n" }, { "alpha_fraction": 0.7516495585441589, "alphanum_fraction": 0.7529325485229492, "avg_line_length": 35.132450103759766, "blob_id": "1d45e9ccd26b72874edb032f6a94af25aa5b530f", "content_id": "a61e20f6fcb5b6954876b71c32c439e43fa67e02", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5456, "license_type": "permissive", "max_line_length": 86, "num_lines": 151, "path": "/docker_command.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "## The docker command\n\nDocker itself is mostly managed through one monolithic command called `docker`\n(surprise!). There are some features of docker which can be misused. For that\nreason docker usually requires root privileges to be run. If you use the MacOS\nor Windows installation of docker you will not need to do anything. On those\nplatforms docker itself runs in a small (tiny, tiny, tiny) virtual machine which\nhas no connection to the host, so there is no way you could break it (so you can\nbe root there). If you installed Docker on linux you will either need to prefix\nall the commands with `sudo` or your use must be added to the `docker` group.\nThis can be achieved in Ubuntu/Debian with (requires restart of the docker service)\n\n```bash\nsudo useradd -aG docker username\n```\nHowever, careful with that! You should consider any user in the docker group\na super-user. From now on we will assume you have the rights to use docker. If\nany of the following commands give you a permission of missing socket error\njust prefix `sudo`.\n\n## Getting started\n\nFirst let's check if the installation went fine. On MacOS and Windows look\nfor the Docker Quick Start Terminal and fire it up. On Linux, open a Terminal\nand type `docker` (or `sudo docker`). This will give you the following output:\n\n```\nUsage: docker [OPTIONS] COMMAND [arg...]\n docker daemon [ --help | ... ]\n docker [ --help | -v | --version ]\n\nA self-sufficient runtime for containers.\n\nOptions:\n...\n\nCommands:\n attach Attach to a running container\n build Build an image from a Dockerfile\n...\n volume Manage Docker volumes\n wait Block until a container stops, then print its exit code\n\nRun 'docker COMMAND --help' for more information on a command.\n```\n\nSo you can see that there is quite a lot that can be done with the `docker`\ncommand usually by using `docker subcommand`.\n\nThere are two important concepts we have to grasp in Docker, images and containers.\nWe have already met containers in introduction. They are small isolated parts on\nyour file system that contain a small operating system together with its own\nfile system (called \"volume\" in docker). An image can be seen as a rule how\na container should look like. It is an immutable snapshot of the default configuration\nof the container. As such an image is something you build once. From each image you\ncan generate an infinite number of containers in which you run stuff or do whatever\nyou want. So TL;DR: we create containers from images. Docker provides an online\nrepository at https://hub.docker.com of many images (>600.000 atm). You can download\nany of the images with `docker pull`. So we will now download a minimal image\nof the latest Debian 8 (Jessie).\n\n```bash\ndocker pull debian\n```\n\nAfter downloading let's check whether we got the image with\n\n```bash\ndocker images\n```\n\nJup, that is our Debian image. See how small it is? This is because docker uses\nthe kernel of the host system. So there is no need for the kernel, drivers or\nany boot infrastructure. To create a container from an image and run it we use\n`docker run`. The `run` command requires at least two things: the name of an image\nand the command to run within the container. So let us run the bash shell in our\nbrand new debian image (you might have to press Enter again after running).\n\n```bash\ndocker run debian /bin/bash\n```\n\nOkay, that just returns us to our shell. What happened. Well, maybe the container\nis running in the background? We can use `docker ps` to see all running containers.\n\n```bash\ndocker ps\n```\n\nNope, nothing here. Okay, let's try the `-a` flag which allows us to see all\nliving or dead containers.\n\n```bash\ndocker ps -a\n```\n\nHey here is our container! And it even got a nice random name! But why does it\nsay exited? Well, we started bash in a non-interactive mode. So bash will run\nall given commands (none in our case) and exit. Pffffff, what a disappointment.\nNow we have that large useless container on our disk. Let's get rid of it. Do\nyou remember the cute name? Use it!\n\n```bash\ndocker rm -v cute_name\ndocker ps -a\n```\n\nOkay, it's gone. Did you notice the `-v` option? By default `docker rm` will\nremove the container but not the file system (the *v*olume). With `-v` we tell\ndocker to delete the volume as well and prevents pollution of our file system.\n\nWell, let's try again with our Debian. This time we will add two option flags\n`-it` to run the container in interactive pseudo-TTY mode, and `--rm` to automatically\ndelete the container and its volumes upon exiting.\n\n```bash\ndocker run -it --rm debian /bin/bash\n```\n\nThat drops you into the bash shell of the Debian container. Yeiih. Well let us\nfirst check whether we are really working with Debian here. In that shell let us\ninstall some stuff.\n\n```bash\napt update\napt install screenfetch cowsay\n```\n\nOkay, that looks like the Debian package manager. Let us verify for sure with\nscreenfetch.\n\n```bash\nscreenfetch\n```\n\nOr do you rather trust cows?\n\n```bash\n/usr/games/cowsay \"Yep it's Debian! MOOOOOOOO!\"\n```\n\nDid you notice that you did not need to type `sudo` to install packages? In\na docker container you are usually root. Finally, to get out of the shell you can\nuse Ctrl+D. And you're back in your host system.\n\nLet's see what we have learned.\n\n- controlling Docker with the `docker` command\n- getting images from Docker Hub\n- running and deleting containers\n- getting a list of available images, running/stopped containers\n" }, { "alpha_fraction": 0.7618743777275085, "alphanum_fraction": 0.7625119686126709, "avg_line_length": 32.73118209838867, "blob_id": "ce22d3b6d2a62e329e5d6df580060ee97b36720b", "content_id": "c36083406cc5fb269bb34acf008ff0ae38d7a46c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3137, "license_type": "permissive", "max_line_length": 80, "num_lines": 93, "path": "/docker_volumes.md", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "# Data persistence with Docker volumes\n\nDocker manges persistent data in volumes. Basically, there are two kinds of\nvolumes:\n\n1. managed volumes that are the used by your containers and that have some\n layer management to reduce disk usage\n2. unmanaged volumes that can be shared between containers or containers and\n the host and should be used for storing large objects\n\nOne of the common pitfalls when using docker is assuming that your data in\ncontainers is safe. Data in conatiners itself should be assumed short lived\nsuch as log files etc. Let's illustrate this.\n\n```bash\ndocker run -it debian\nmkdir /data && cd data\necho \"It's alive!\" > frankenstein.txt\ncat frankenstein.txt\nexit\n```\n\nWhere is the file now? Well actually it's not lost. As long as we don't delete\nthe container the managed file system is still there and we could recover it.\n\n```bash\ndocker start prickly_perlman # That's the name I got\ndocker attach prickly_perlman\ncat /data/frankenstein.txt\nexit\ndocker rm prickly_perlman\n```\n\nHowever in the moment we remove the container the data is gone. A much nicer\nway is to create a custom volume for the data. A volume is a data container and\nusually simply a pretty thin wrapper arounf your local file system. However, it\nmay use a myriad of different volume drivers, for instance it could also be\na file on Hadoop, data in the cloud and many more. Volumes in docker can be\nmanaged with the `volume` subcommand.\n\n```bash\ndocker volume ls\n```\n\nCurrently, there is nothing. So let's create something. Since volumes are just\ndata without any API you will need a container to write to it. Volume mapping\nin docker is realized with the `-v` option and has the syntax\n`name_or_path:path_in_container` (the `outside:inside` is pretty common in\ndocker). For now we will only give a name which will create a new volume on\nour local disk.\n\n```bash\ndocker run -it --rm -v my_data:/data debian\ncd data\necho \"It's alive!\" > frankenstein.txt\nexit\ndocker rm name\n\ndocker volume ls\n```\n\nAs we see we have created a new data volume and our data is kept safe in there.\nWe can attack the data volume to any one or even several containers afterwards.\n\n```bash\ndocker run -it --rm -v my_data:/data debian\ncat /data/frankenstein.txt\n```\n\nSharing volumes is a pretty cool feature. For instance you can have many\ncontainers write log files or backups to the same location. You can remove\nvolumes with `docker volume rm`\n\n```bash\ndocker volume rm my_data # all clean now :D\n```\n\nInstead of using data volumes you can also directly map locations between\nthe host and the container. Just let the \"inside\" part be a complete\n**existing** path.\n\n```bash\nmkdir test\ndocker run -it --rm -v /home/my_user/test:/test debian\necho \"I'm flying through walls booooooo\" > /test/ghost.txt\nexit\n```\n\nOkay, the file is also mirrored on the host. So now use `ls` to see who the\nfile belongs to... Wow, it's from `root`. This is why volume mapping is probably\none of dockers most dangerous feature. In a mapped volume you may be root and\ncan edit or delete at your leisure. This is why you should consider everybody\nin the \"docker\" group to be an admin.\n" }, { "alpha_fraction": 0.4795764088630676, "alphanum_fraction": 0.48562783002853394, "avg_line_length": 26.54166603088379, "blob_id": "9b848312271de26f182b94189b1055a26adcc747", "content_id": "46794ece95083373983f4adae9f02d510ef3228c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "permissive", "max_line_length": 77, "num_lines": 24, "path": "/mdcode.py", "repo_name": "resendislab/docker_mini_training", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\n\n\ndef extract(filename):\n incode = False\n count = 1\n with open(filename) as mdfile, open(filename + \".code\", 'w') as codefile:\n for l in mdfile:\n if l.find(\"```\") > -1:\n incode = not incode\n if incode:\n codefile.write(\"### Block {} ###\\n\".format(count))\n count += 1\n else:\n codefile.write(\"\\n\")\n elif incode:\n codefile.write(l)\n\n\nif __name__ == \"__main__\":\n success = [extract(f) for f in sys.argv]\n print(\"Extracted code for all markdownfiles into file.md.code.\")\n" } ]
8
universalmind303/randomquote
https://github.com/universalmind303/randomquote
768f61cd2fe6edc44093be1182f08fe987211e83
a01da525e9b9599b33f5fff285037ec78d7a409b
41a039fdac9741a89074723ae56386f72a63bd21
refs/heads/master
2021-01-20T12:55:52.335856
2017-05-06T01:51:57
2017-05-06T01:51:57
90,429,888
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6061997413635254, "alphanum_fraction": 0.6107922196388245, "avg_line_length": 20.799999237060547, "blob_id": "9005d9cd7d0604aea7f20499878c0ce1a1c5bd62", "content_id": "a6f1c5c247b6d90abc0e9d0b7250dec57b946530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 871, "license_type": "no_license", "max_line_length": 57, "num_lines": 40, "path": "/src/app/app.component.ts", "repo_name": "universalmind303/randomquote", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from \"@angular/core\";\nimport axios from \"axios\"\nimport { Compiler } from '@angular/core';\n\n@Component({\n selector: \"app\",\n template: require(\"./app.component.html\"),\n styles: [require(\"./app.component.scss\")]\n})\nexport class AppComponent implements OnInit {\n private http = axios\n quote: string;\n title: string;\n constructor(private compiler: Compiler) {}\n\n async quoteGen() {\n try {\n let res = await this.http({\n method: 'get',\n headers: {'Cache-Control': 'no-cache'},\n url: 'http://localhost:5000/api/getquotes?random'\n })\n console.log(JSON.stringify(res.data))\n console.log(res.data)\n this.title = res.data.title\n this.quote = res.data.content\n }\n catch(e) {\n console.log(e)\n }\n }\n ngOnInit() {\n this.quoteGen()\n\n }\n newQuote() {\n this.quoteGen()\n }\n\n}" }, { "alpha_fraction": 0.7614678740501404, "alphanum_fraction": 0.7614678740501404, "avg_line_length": 26.375, "blob_id": "1bf15175cd92ee1571ed723875bbede78b0875c9", "content_id": "5afcaa53c2fd3ccc02dd8e7a4f79fe39711bf35a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 218, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/src/vendor.ts", "repo_name": "universalmind303/randomquote", "src_encoding": "UTF-8", "text": "import \"jquery\";\nimport \"bootstrap-loader\";\nimport \"font-awesome-loader\";\nimport \"@angular/platform-browser\";\nimport \"@angular/platform-browser-dynamic\";\nimport \"@angular/core\";\nimport \"@angular/common\";\nimport 'axios'" }, { "alpha_fraction": 0.5247252583503723, "alphanum_fraction": 0.541208803653717, "avg_line_length": 25.071428298950195, "blob_id": "283c2ed591ac17e266cc656b9f03d08fed5cb260", "content_id": "fa0a6fd366166908c733a20a55f72bb70f27f212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JSON with Comments", "length_bytes": 364, "license_type": "no_license", "max_line_length": 93, "num_lines": 14, "path": "/tsconfig.json", "repo_name": "universalmind303/randomquote", "src_encoding": "UTF-8", "text": "{\n \"compilerOptions\": {\n \"module\": \"commonjs\",\n \"target\": \"es6\",\n \"moduleResolution\": \"node\",\n \"experimentalDecorators\" : true,\n \"emitDecoratorMetadata\": true/*,\n \"strictNullChecks\": true*/ //waiting on https://github.com/angular/angular/pull/14679\n },\n \"exclude\": [\n \"node_modules\",\n \"dist\"\n ]\n}" }, { "alpha_fraction": 0.6740685701370239, "alphanum_fraction": 0.677276074886322, "avg_line_length": 41.21875, "blob_id": "f4e835ea81a125cded8df13612e3da83c378c15b", "content_id": "ac21abf083c74886cc9775bc759ec041795ee118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4053, "license_type": "no_license", "max_line_length": 240, "num_lines": 96, "path": "/server/index.py", "repo_name": "universalmind303/randomquote", "src_encoding": "UTF-8", "text": "from flask import Flask, send_file, jsonify, request, render_template\nfrom flask_cors import CORS, cross_origin\nfrom flask_pymongo import PyMongo\nimport os\nimport pprint\nimport random\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\napp = Flask(__name__)\nmongo = PyMongo(app)\n\[email protected](\"/\")\ndef send_index():\n return render_template(\"static/dist/index.html\")\n\[email protected](\"/api/addquote\", methods=['POST'],)\ndef add_quote():\n try:\n title = request.json['title']\n content = request.json['content']\n mongo.db.quotes.insert_one({\n 'title':title, 'content': content\n })\n return jsonify(status=\"OK\", message=\"inserted successfully\")\n except Exception,e:\n return jsonify(status=\"ERROR\", message=str(e))\n\[email protected](\"/api/getquotes\", methods=[\"GET\"])\n@cross_origin()\ndef get_quotes():\n try:\n qouteColl = []\n for quotes in mongo.db.quotes.find():\n qouteColl.append({\"id\": str(quotes['_id']), \"title\": quotes['title']}) \n\n if(request.query_string == 'random'):\n return jsonify(random.choice(qouteColl))\n return jsonify(qouteColl)\n except Exception as e:\n return jsonify(status=\"ERROR\", message=str(e))\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Return a custom 404 error.\"\"\"\n return 'Sorry, nothing at this URL.', 404\n\n\n# make sure this gets disabled before deploying\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nrandomlist = [\n{'title':\"Will Estes\", 'content': \"I'm into all that sappy stuff - a surprise picnic, nice dinner, or traveling. I'm kind of an old romantic.\"},\n{'title':\"Dr. Seuss\", 'content': \"Don't cry because it's over, smile because it happened.\"},\n{'title':\"Marilyn Monroe\", 'content': \"I'm selfish, impatient and a little insecure. I make mistakes, I am out of control and at times hard to handle. \\\nBut if you can't handle me at my worst, then you sure as hell don't deserve me at my best.\" },\n{\"title\": \"Oscar Wilde\", \"content\": \"Be yourself; everyone else is already taken.\"},\n{\"title\": \"Albert Einstein\", \"content\": \"Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.\"},\n{\"title\": \"Bernard M. Baruch\", \"content\": \"Be who you are and say what you feel, because those who mind don't matter, and those who matter don't mind.\"},\n{\"title\": \"Frank Zappa\", \"content\": \"So many books, so little time.\"},\n{\"title\": \"William W. Purkey\", \"content\": \"You've gotta dance like there's nobody watching, \\\nLove like you'll never be hurt,\\\nSing like there's nobody listening,\\\nAnd live like it's heaven on earth.\"},\n{\"title\": \"Marcus Tullius Cicero\", \"content\": \"A room without books is like a body without a soul.\"},\n{\"title\": \"Dr. Seuss\", \"content\": \"You know you're in love when you can't fall asleep because reality is finally better than your dreams.\"},\n{\"title\": \"Mae West\", \"content\": \"You only live once, but if you do it right, once is enough.\"},\n{\"title\": \"Mahatma Gandhi\", \"content\": \"Be the change that you wish to see in the world.\"},\n{\"title\": \"Robert Frost\", \"content\": \"In three words I can sum up everything I've learned about life: it goes on.\"},\n{\"title\": \"J.K. Rowling, Harry Potter and the Goblet of Fire\", \"content\": \"If you want to know what a man's like, take a good look at how he treats his inferiors, not his equals.\"},\n{\"title\": \"Daniel Readon\", \"content\": \"In the long run, the pessimist may be proven right, but the optimist has a better time on the trip.\"},\n{\"title\": \"-Maureen Reagan\", \"content\": \"I will feel equality has arrived when we can elect to office women who are as unqualified as some of the men who are already there.\"},\n{\"title\": \"Unknown\", \"content\": \"Life is not a journey to the grave with the intention of arriving safely in a pretty and well preserved body, but rather to skid in broadside, thoroughly used up, totally worn out, and loudly proclaiming \"},\n{\"title\": \"Unknown\", \"content\": \"Enjoy life. There's plenty of time to be dead.\"},\n{\"title\": \"Proverb\", \"content\": \"A light heart lives long.\"},\n]\n" } ]
4
mraltuntass/FieldSAFE
https://github.com/mraltuntass/FieldSAFE
0cc785e7a17e8867de7421003a5ce899cdb60366
fbcb8b6829663e2b5fd41d1a05709be237cf0f85
50fd33d45edc1507be11392fb127d095ed979372
refs/heads/master
2022-12-17T17:33:15.398471
2020-09-25T12:03:26
2020-09-25T12:03:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6678845882415771, "alphanum_fraction": 0.703672468662262, "avg_line_length": 47.51818084716797, "blob_id": "70ad091bdbcef91cf539b0887ec1cc3ea9868413", "content_id": "ae3213cac5191e4e42e14ac90be91fdfb3f8e305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 21348, "license_type": "no_license", "max_line_length": 259, "num_lines": 440, "path": "/ros/src/htf_delphi_esr/src/radar_delphi_visualization_node.cpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "/****************************************************************************\n# Copyright (c) 2016, author Mikkel Kragh Hansen\n# [email protected]\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name FroboMind nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n ****************************************************************************/\n\n\n#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include <sstream>\n\n// ROS\n#include <visualization_msgs/Marker.h>\n#include <visualization_msgs/MarkerArray.h>\n#include <geometry_msgs/Point.h>\n#include \"htf_delphi_esr/Delphi_radar.h\"\n\n#define PI 3.141592653589793f\n\nstruct color\n{\n\tfloat r;\n\tfloat g;\n\tfloat b;\n};\ncolor colors[16] = {{1,0,0},{0,1,0},{0,0,1},{0,0.4470,0.7410},{0.8500,0.3250,0.0980},{0.9290,0.6940,0.1250},{0.4940,0.1840,0.5560},{0.4660,0.6740,0.1880},{0.3010,0.7450,0.9330},{0.6350,0.0780,0.1840},{1,1,0},{1,0,1},{0,1,1},{0.5,0.5},{0.5,0,0.5},{0,0.5,0.5}};\n\nros::Publisher pubVisualization;\ndouble pitch_angle,height;\n\nstruct radar_data\n{\n\tunsigned char CAN_TX_F_VALID_DETECTION_LL;\n\tunsigned char CAN_TX_DETECT_STATUS_LL;\n\tdouble CAN_TX_DET_AMPLITUTDE_LL;\n\tdouble CAN_TX_DET_ANGLE_LL;\n\tdouble CAN_TX_DET_RANGE_LL;\n\tdouble CAN_TX_DET_RANGE_RATE_LL;\n\tunsigned char CAN_TX_F_VALID_DETECTION_ML;\n\tunsigned char CAN_TX_DETECT_STATUS_ML;\n\tdouble CAN_TX_DET_AMPLITUTDE_ML;\n\tdouble CAN_TX_DET_ANGLE_ML;\n\tdouble CAN_TX_DET_RANGE_ML;\n\tdouble CAN_TX_DET_RANGE_RATE_ML;\n};\n\nstd::vector<radar_data> msg2array(const htf_delphi_esr::Delphi_radarConstPtr msg)\n{\n\tstd::vector<radar_data> data_array;\n\tradar_data data;\n\tfor (int i=0;i<16;i++)\n\t\tdata_array.push_back(data);\n\n\t// Target 1\n\tdata_array[0].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_1;\n\tdata_array[0].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_1;\n\tdata_array[0].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_1;\n\tdata_array[0].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_1;\n\tdata_array[0].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_1;\n\tdata_array[0].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_1;\n\tdata_array[0].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_1;\n\tdata_array[0].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_1;\n\tdata_array[0].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_1;\n\tdata_array[0].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_1;\n\tdata_array[0].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_1;\n\tdata_array[0].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_1;\n\n\t// Target 2\n\tdata_array[1].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_2;\n\tdata_array[1].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_2;\n\tdata_array[1].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_2;\n\tdata_array[1].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_2;\n\tdata_array[1].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_2;\n\tdata_array[1].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_2;\n\tdata_array[1].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_2;\n\tdata_array[1].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_2;\n\tdata_array[1].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_2;\n\tdata_array[1].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_2;\n\tdata_array[1].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_2;\n\tdata_array[1].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_2;\n\n\t// Target 3\n\tdata_array[2].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_3;\n\tdata_array[2].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_3;\n\tdata_array[2].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_3;\n\tdata_array[2].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_3;\n\tdata_array[2].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_3;\n\tdata_array[2].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_3;\n\tdata_array[2].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_3;\n\tdata_array[2].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_3;\n\tdata_array[2].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_3;\n\tdata_array[2].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_3;\n\tdata_array[2].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_3;\n\tdata_array[2].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_3;\n\n\t// Target 4\n\tdata_array[3].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_4;\n\tdata_array[3].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_4;\n\tdata_array[3].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_4;\n\tdata_array[3].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_4;\n\tdata_array[3].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_4;\n\tdata_array[3].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_4;\n\tdata_array[3].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_4;\n\tdata_array[3].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_4;\n\tdata_array[3].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_4;\n\tdata_array[3].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_4;\n\tdata_array[3].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_4;\n\tdata_array[3].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_4;\n\n\t// Target 5\n\tdata_array[4].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_5;\n\tdata_array[4].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_5;\n\tdata_array[4].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_5;\n\tdata_array[4].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_5;\n\tdata_array[4].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_5;\n\tdata_array[4].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_5;\n\tdata_array[4].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_5;\n\tdata_array[4].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_5;\n\tdata_array[4].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_5;\n\tdata_array[4].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_5;\n\tdata_array[4].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_5;\n\tdata_array[4].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_5;\n\n\t// Target 6\n\tdata_array[5].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_6;\n\tdata_array[5].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_6;\n\tdata_array[5].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_6;\n\tdata_array[5].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_6;\n\tdata_array[5].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_6;\n\tdata_array[5].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_6;\n\tdata_array[5].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_6;\n\tdata_array[5].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_6;\n\tdata_array[5].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_6;\n\tdata_array[5].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_6;\n\tdata_array[5].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_6;\n\tdata_array[5].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_6;\n\n\t// Target 7\n\tdata_array[6].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_7;\n\tdata_array[6].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_7;\n\tdata_array[6].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_7;\n\tdata_array[6].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_7;\n\tdata_array[6].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_7;\n\tdata_array[6].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_7;\n\tdata_array[6].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_7;\n\tdata_array[6].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_7;\n\tdata_array[6].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_7;\n\tdata_array[6].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_7;\n\tdata_array[6].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_7;\n\tdata_array[6].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_7;\n\n\t// Target 8\n\tdata_array[7].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_8;\n\tdata_array[7].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_8;\n\tdata_array[7].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_8;\n\tdata_array[7].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_8;\n\tdata_array[7].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_8;\n\tdata_array[7].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_8;\n\tdata_array[7].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_8;\n\tdata_array[7].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_8;\n\tdata_array[7].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_8;\n\tdata_array[7].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_8;\n\tdata_array[7].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_8;\n\tdata_array[7].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_8;\n\n\t// Target 9\n\tdata_array[8].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_9;\n\tdata_array[8].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_9;\n\tdata_array[8].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_9;\n\tdata_array[8].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_9;\n\tdata_array[8].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_9;\n\tdata_array[8].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_9;\n\tdata_array[8].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_9;\n\tdata_array[8].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_9;\n\tdata_array[8].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_9;\n\tdata_array[8].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_9;\n\tdata_array[8].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_9;\n\tdata_array[8].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_9;\n\n\t// Target 10\n\tdata_array[9].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_10;\n\tdata_array[9].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_10;\n\tdata_array[9].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_10;\n\tdata_array[9].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_10;\n\tdata_array[9].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_10;\n\tdata_array[9].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_10;\n\tdata_array[9].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_10;\n\tdata_array[9].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_10;\n\tdata_array[9].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_10;\n\tdata_array[9].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_10;\n\tdata_array[9].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_10;\n\tdata_array[9].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_10;\n\n\t// Target 11\n\tdata_array[10].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_11;\n\tdata_array[10].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_11;\n\tdata_array[10].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_11;\n\tdata_array[10].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_11;\n\tdata_array[10].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_11;\n\tdata_array[10].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_11;\n\tdata_array[10].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_11;\n\tdata_array[10].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_11;\n\tdata_array[10].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_11;\n\tdata_array[10].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_11;\n\tdata_array[10].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_11;\n\tdata_array[10].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_11;\n\n\t// Target 12\n\tdata_array[11].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_12;\n\tdata_array[11].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_12;\n\tdata_array[11].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_12;\n\tdata_array[11].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_12;\n\tdata_array[11].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_12;\n\tdata_array[11].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_12;\n\tdata_array[11].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_12;\n\tdata_array[11].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_12;\n\tdata_array[11].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_12;\n\tdata_array[11].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_12;\n\tdata_array[11].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_12;\n\tdata_array[11].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_12;\n\n\t// Target 13\n\tdata_array[12].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_13;\n\tdata_array[12].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_13;\n\tdata_array[12].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_13;\n\tdata_array[12].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_13;\n\tdata_array[12].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_13;\n\tdata_array[12].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_13;\n\tdata_array[12].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_13;\n\tdata_array[12].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_13;\n\tdata_array[12].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_13;\n\tdata_array[12].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_13;\n\tdata_array[12].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_13;\n\tdata_array[12].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_13;\n\n\t// Target 14\n\tdata_array[13].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_14;\n\tdata_array[13].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_14;\n\tdata_array[13].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_14;\n\tdata_array[13].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_14;\n\tdata_array[13].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_14;\n\tdata_array[13].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_14;\n\tdata_array[13].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_14;\n\tdata_array[13].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_14;\n\tdata_array[13].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_14;\n\tdata_array[13].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_14;\n\tdata_array[13].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_14;\n\tdata_array[13].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_14;\n\n\t// Target 15\n\tdata_array[14].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_15;\n\tdata_array[14].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_15;\n\tdata_array[14].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_15;\n\tdata_array[14].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_15;\n\tdata_array[14].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_15;\n\tdata_array[14].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_15;\n\tdata_array[14].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_15;\n\tdata_array[14].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_15;\n\tdata_array[14].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_15;\n\tdata_array[14].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_15;\n\tdata_array[14].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_15;\n\tdata_array[14].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_15;\n\n\t// Target 16\n\tdata_array[15].CAN_TX_F_VALID_DETECTION_LL = msg->CAN_TX_F_VALID_DETECTION_LL_16;\n\tdata_array[15].CAN_TX_DETECT_STATUS_LL = msg->CAN_TX_DETECT_STATUS_LL_16;\n\tdata_array[15].CAN_TX_DET_AMPLITUTDE_LL = msg->CAN_TX_DET_AMPLITUTDE_LL_16;\n\tdata_array[15].CAN_TX_DET_ANGLE_LL = msg->CAN_TX_DET_ANGLE_LL_16;\n\tdata_array[15].CAN_TX_DET_RANGE_LL = msg->CAN_TX_DET_RANGE_LL_16;\n\tdata_array[15].CAN_TX_DET_RANGE_RATE_LL = msg->CAN_TX_DET_RANGE_RATE_LL_16;\n\tdata_array[15].CAN_TX_F_VALID_DETECTION_ML = msg->CAN_TX_F_VALID_DETECTION_ML_16;\n\tdata_array[15].CAN_TX_DETECT_STATUS_ML = msg->CAN_TX_DETECT_STATUS_ML_16;\n\tdata_array[15].CAN_TX_DET_AMPLITUTDE_ML = msg->CAN_TX_DET_AMPLITUTDE_ML_16;\n\tdata_array[15].CAN_TX_DET_ANGLE_ML = msg->CAN_TX_DET_ANGLE_ML_16;\n\tdata_array[15].CAN_TX_DET_RANGE_ML = msg->CAN_TX_DET_RANGE_ML_16;\n\tdata_array[15].CAN_TX_DET_RANGE_RATE_ML = msg->CAN_TX_DET_RANGE_RATE_ML_16;\n\n\treturn data_array;\n}\n\ndouble minML,maxML,minLL,maxLL;\n\n\nvoid messageHandler(const htf_delphi_esr::Delphi_radarConstPtr msg)\n{\n//\tROS_INFO(\"Radar data received\");\n\tstd::vector<radar_data> data = msg2array(msg);\n\n\tvisualization_msgs::MarkerArray markers;\n\tvisualization_msgs::Marker markerML;\n\tvisualization_msgs::Marker markerLL;\n\n\tmarkerLL.header.frame_id = msg->header.frame_id;\n\tmarkerLL.header.stamp = msg->header.stamp;\n\tmarkerLL.ns = \"my_namespace\";\n\tmarkerLL.mesh_resource = \"package://pr2_description/meshes/base_v0/base.dae\";\n\n\tmarkerLL.type = visualization_msgs::Marker::CYLINDER;\n\tmarkerLL.action = visualization_msgs::Marker::ADD;\n\n\tmarkerLL.pose.orientation.x = 0.0;\n\tmarkerLL.pose.orientation.y = 0.0;\n\tmarkerLL.pose.orientation.z = 0.0;\n\tmarkerLL.pose.orientation.w = 1.0;\n\tmarkerLL.color.a = 1.0; // Don't forget to set the alpha!\n\n\tmarkerML = markerLL;\n\n\tstd::vector<geometry_msgs::Point> detections;\n\n\t// Remove all markers from previous frame\n\tmarkerML.action = visualization_msgs::Marker::DELETEALL;\n\tmarkers.markers.push_back(markerML);\n\tmarkerML.action = visualization_msgs::Marker::ADD;\n\n\tfor (int i=0;i<data.size();i++)\n\t{\n\t\tmarkerML.id = i;\n\t\tmarkerLL.id = 16+i;\n\n\t\tdouble rangeML = data[i].CAN_TX_DET_RANGE_ML;\n\t\tdouble angleML = data[i].CAN_TX_DET_ANGLE_ML*PI/180;\n\t\tdouble rangeLL = data[i].CAN_TX_DET_RANGE_LL;\n\t\tdouble angleLL = data[i].CAN_TX_DET_ANGLE_LL*PI/180;\n\n\t\t// Convert from polar to cartesian coordinates (and include potential pitch angle of sensor)\n\t\tdouble maxPlanarDistance = height*tan(PI/2-pitch_angle);\n\t\tdouble planarDistanceML = rangeML*cos(pitch_angle);\n\t\tgeometry_msgs::Point detML;\n\t\tdetML.x = planarDistanceML*cos(angleLL);\n\t\tdetML.y = planarDistanceML*sin(angleLL);\n\t\tdouble planarDistanceLL = rangeLL*cos(pitch_angle);\n\t\tgeometry_msgs::Point detLL;\n\t\tdetLL.x = planarDistanceLL*cos(angleLL);\n\t\tdetLL.y = planarDistanceLL*sin(angleLL);\n\n\n\n\t\tmarkerML.pose.position.x = detML.x;\n\t\tmarkerML.pose.position.y = detML.y;\n\t\tmarkerML.pose.position.z = 0.0;\n\n\t\tmarkerLL.pose.position.x = detLL.x;\n\t\tmarkerLL.pose.position.y = detLL.y;\n\t\tmarkerLL.pose.position.z = 0.0;\n\n\t\tdouble amplitudeML = data[i].CAN_TX_DET_AMPLITUTDE_ML;\n\t\tdouble amplitudeLL = data[i].CAN_TX_DET_AMPLITUTDE_LL;\n\n\t\tif (amplitudeML < minML)\n\t\t\tminML = amplitudeML;\n\t\tif (amplitudeML > maxML)\n\t\t\tmaxML = amplitudeML;\n\t\tif (amplitudeLL < minLL)\n\t\t\tminLL = amplitudeLL;\n\t\tif (amplitudeLL > maxLL)\n\t\t\tmaxLL = amplitudeLL;\n\n\t\tdouble scaleML = 1.0;\n\t\tmarkerML.scale.x = scaleML;\n\t\tmarkerML.scale.y = scaleML;\n\t\tmarkerML.scale.z = scaleML;\n\n\t\tdouble scaleLL = 1.0;\n\t\tmarkerLL.scale.x = scaleLL;\n\t\tmarkerLL.scale.y = scaleLL;\n\t\tmarkerLL.scale.z = scaleLL;\n\n\t\tmarkerML.color.r = 0;\n\t\tmarkerML.color.g = 1;\n\t\tmarkerML.color.b = 0;\n\t\tmarkerML.color.a = (amplitudeML-minML)/(maxML-minML);\n\n\t\tmarkerLL.color.r = 1;\n\t\tmarkerLL.color.g = 0;\n\t\tmarkerLL.color.b = 0;\n\t\tmarkerLL.color.a = (amplitudeLL-minLL)/(maxLL-minLL);\n\n\t\tif (data[i].CAN_TX_F_VALID_DETECTION_ML && data[i].CAN_TX_DETECT_STATUS_ML)\n\t\t\tdetections.push_back(detML);\n\n\t\tif (data[i].CAN_TX_F_VALID_DETECTION_LL && data[i].CAN_TX_DETECT_STATUS_LL)\n\t\t\tdetections.push_back(detLL);\n\n\t\tmarkers.markers.push_back(markerML);\n\t\tmarkers.markers.push_back(markerLL);\n\t}\n\n\tpubVisualization.publish( markers );\n}\n\nint main(int argc, char **argv)\n{\n\tros::init(argc, argv, \"htf_delphi_esr_node\");\n\tros::NodeHandle n(\"~\");\n\n\tminML = std::numeric_limits<double>::max();\n\tmaxML = -std::numeric_limits<double>::max();\n\tminLL = std::numeric_limits<double>::max();\n\tmaxLL = -std::numeric_limits<double>::max();\n\n\tstd::string topic_delphi_radar,topic_visualization;\n\n\tn.param<std::string>(\"topic_delphi_radar\", topic_delphi_radar, \"/Delphi_ESR/RadarData\");\n\tn.param<std::string>(\"topic_visualization\", topic_visualization, \"/Delphi_ESR/markers\");\n\n\t// Setup\n\tn.param<double>(\"pitch_angle\",pitch_angle,0);\n\tn.param<double>(\"height\",height,0);\n\n\tros::Subscriber subGps_course_speed = n.subscribe<htf_delphi_esr::Delphi_radar>(topic_delphi_radar, 2, messageHandler);\n\tpubVisualization = n.advertise<visualization_msgs::MarkerArray>(topic_visualization,1);\n\n\tros::spin();\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7077625393867493, "alphanum_fraction": 0.7226027250289917, "avg_line_length": 53.8125, "blob_id": "e6f18f65bc90c5e8dda9246bf71afe6c54dd9fc7", "content_id": "9c0d249797e87211490a040706453222d383ac01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 876, "license_type": "no_license", "max_line_length": 155, "num_lines": 16, "path": "/ground_truth/dynamic/readme.md", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "# Usage\nGround truth data for dynamic obstacles is contained in *dynamic_ground_truth.txt*.\nIt has the following format:\n\n 1 Track ID. All rows with the same ID belong to the same path.\n 2 x. The x-coordinate (column) of the object in vatic.\n 3 y. The y-coordinate (row) of the object in vatic.\n 4 frame. The vatic frame that this annotation represents.\n 5 timestamp. The corresponding ROS timestamp.\n 6 lost. If 1, the annotation is outside of the view screen.\n 7 occluded. Unused, ignore.\n 8 generated. If 1, the annotation was automatically interpolated.\n 9 label. The label for this annotation, enclosed in quotation marks.\n 10 state. The state of the object if any.\n\nA simple usage example is shown in *demo.m*, where the dynamic obstacles (humans) and the tractor are plotted over time on top of the annotated static map." }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7685714364051819, "avg_line_length": 14.954545021057129, "blob_id": "a3fd04280e7dc478b2936f96d3f94bbb4021bdb2", "content_id": "1f24ed850575c8d762e0b0eddda9540a63605d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 350, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/ros/src/ground_truth/CMakeLists.txt", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(ground_truth)\n\nfind_package(catkin REQUIRED COMPONENTS\n rospy\n std_msgs\n visualization_msgs\n)\n\ncatkin_package(\n CATKIN_DEPENDS\n)\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n)\n\ninstall(PROGRAMS\n src/dynamic_visualizer.py\n src/static_visualizer.py\n DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}\n)" }, { "alpha_fraction": 0.601345419883728, "alphanum_fraction": 0.6063253283500671, "avg_line_length": 44.601593017578125, "blob_id": "1a681bbefacc45e3a0fef07281d6464b3ef3499c", "content_id": "cd60a9f3972d06a1dc58747d5c13aa4a43870cb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11446, "license_type": "no_license", "max_line_length": 135, "num_lines": 251, "path": "/ros/src/gps/nmea_navsat_driver/src/libnmea_navsat_driver/driver.py", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2013, Eric Perko\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the names of the authors nor the names of their\n# affiliated organizations may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport math\n\nimport rospy\n\nfrom sensor_msgs.msg import NavSatFix, NavSatStatus, TimeReference\nfrom geometry_msgs.msg import TwistStamped\nfrom htf_safe_msgs.msg import Course_Speed\nfrom htf_safe_msgs.msg import GPHDT\n\nfrom libnmea_navsat_driver.checksum_utils import check_nmea_checksum\nimport libnmea_navsat_driver.parser\n\n\nclass RosNMEADriver(object):\n def __init__(self):\n self.fix_pub = rospy.Publisher('fix', NavSatFix, queue_size=1)\n self.vtg_pub = rospy.Publisher('vtg', Course_Speed, queue_size=1)\n self.hdt_pub = rospy.Publisher('hdt', GPHDT, queue_size=1)\n self.vel_pub = rospy.Publisher('vel', TwistStamped, queue_size=1)\n self.time_ref_pub = rospy.Publisher('time_reference', TimeReference, queue_size=1)\n\n self.time_ref_source = rospy.get_param('~time_ref_source', None)\n self.use_RMC = rospy.get_param('~useRMC', False)\n self.use_GPS_time = rospy.get_param('~useGPStime', False)\n self.time_delay = rospy.get_param('~time_delay',0.0)\n self.time_delay_heading = rospy.get_param('~time_delay_heading',0.0)\n self.gps_time = None\n\n\tself.gps_covariance_pos = rospy.get_param('~gps_covariance_pos', [0.0, 0.0, 1.0])\n\n # Returns True if we successfully did something with the passed in\n # nmea_string\n def add_sentence(self, nmea_string, frame_id, timestamp=None):\n if not check_nmea_checksum(nmea_string):\n rospy.logwarn(\"Received a sentence with an invalid checksum. \" +\n \"Sentence was: %s\" % repr(nmea_string))\n return False\n parsed_sentence = libnmea_navsat_driver.parser.parse_nmea_sentence(nmea_string)\n if not parsed_sentence:\n rospy.logdebug(\"Failed to parse NMEA sentence. Sentece was: %s\" % nmea_string)\n return False\n\n if timestamp:\n current_time = timestamp\n else:\n current_time = rospy.get_rostime()\n current_fix = NavSatFix()\n current_fix.header.stamp = current_time\n current_fix.header.frame_id = frame_id\n current_time_ref = TimeReference()\n current_time_ref.header.stamp = current_time\n current_time_ref.header.frame_id = frame_id\n if self.time_ref_source:\n current_time_ref.source = self.time_ref_source\n else:\n current_time_ref.source = frame_id\n \n if not self.use_RMC and 'GGA' in parsed_sentence:\n #rospy.loginfo('GGA')\n data = parsed_sentence['GGA']\n gps_qual = data['fix_type']\n if gps_qual == 0:\n current_fix.status.status = NavSatStatus.STATUS_NO_FIX\n elif gps_qual == 1:\n current_fix.status.status = NavSatStatus.STATUS_FIX\n elif gps_qual == 2:\n current_fix.status.status = NavSatStatus.STATUS_SBAS_FIX\n elif gps_qual in (4, 5):\n current_fix.status.status = NavSatStatus.STATUS_GBAS_FIX\n else:\n current_fix.status.status = NavSatStatus.STATUS_NO_FIX\n\n current_fix.status.service = NavSatStatus.SERVICE_GPS\n\n current_fix.header.stamp = current_time\n\n latitude = data['latitude']\n if data['latitude_direction'] == 'S':\n latitude = -latitude\n current_fix.latitude = latitude\n\n longitude = data['longitude']\n if data['longitude_direction'] == 'W':\n longitude = -longitude\n current_fix.longitude = longitude\n\n #hdop = data['hdop']\n #current_fix.position_covariance[0] = hdop ** 2\n #current_fix.position_covariance[4] = hdop ** 2\n #current_fix.position_covariance[8] = (2 * hdop) ** 2 # FIXME\n #current_fix.position_covariance_type = \\\n # NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n \n # covariances diagonals as rospy arguments\n current_fix.position_covariance[0] = self.gps_covariance_pos[0]\n current_fix.position_covariance[4] = self.gps_covariance_pos[1]\n current_fix.position_covariance[8] = self.gps_covariance_pos[2]\n current_fix.position_covariance_type =\\\n NavSatFix.COVARIANCE_TYPE_DIAGONAL_KNOWN\n\n # Altitude is above ellipsoid, so adjust for mean-sea-level\n altitude = data['altitude'] + data['mean_sea_level']\n current_fix.altitude = altitude\n\n if not math.isnan(data['utc_time']):\n current_time_ref.time_ref = rospy.Time.from_sec(int(int(timestamp.to_sec())/(60*60))*(60*60)+data['utc_time'])\n self.time_ref_pub.publish(current_time_ref)\n\t\tif self.use_GPS_time:\n self.gps_time = current_time_ref.time_ref\n current_fix.header.stamp = current_time_ref.time_ref\n current_fix.header.stamp = current_fix.header.stamp+rospy.Duration(self.time_delay)\n #print(timestamp.to_sec())\n self.fix_pub.publish(current_fix)\n\n #if not math.isnan(data['utc_time']):\n # current_time_ref.time_ref = rospy.Time.from_sec(data['utc_time'])\n # self.time_ref_pub.publish(current_time_ref)\n\n elif 'RMC' in parsed_sentence:\n data = parsed_sentence['RMC']\n #rospy.loginfo('RMC')\n # Only publish a fix from RMC if the use_RMC flag is set.\n if self.use_RMC:\n if data['fix_valid']:\n current_fix.status.status = NavSatStatus.STATUS_FIX\n else:\n current_fix.status.status = NavSatStatus.STATUS_NO_FIX\n\n current_fix.status.service = NavSatStatus.SERVICE_GPS\n\n latitude = data['latitude']\n if data['latitude_direction'] == 'S':\n latitude = -latitude\n current_fix.latitude = latitude\n\n longitude = data['longitude']\n if data['longitude_direction'] == 'W':\n longitude = -longitude\n current_fix.longitude = longitude\n\n current_fix.altitude = float('NaN')\n current_fix.position_covariance_type = \\\n NavSatFix.COVARIANCE_TYPE_UNKNOWN\n\n self.fix_pub.publish(current_fix)\n\n if not math.isnan(data['utc_time']):\n current_time_ref.time_ref = rospy.Time.from_sec(data['utc_time'])\n self.time_ref_pub.publish(current_time_ref)\n\n # Publish velocity from RMC regardless, since GGA doesn't provide it.\n if data['fix_valid']:\n current_vel = TwistStamped()\n current_vel.header.stamp = current_time\n current_vel.header.frame_id = frame_id\n current_vel.twist.linear.x = data['speed'] * \\\n math.sin(data['true_course'])\n current_vel.twist.linear.y = data['speed'] * \\\n math.cos(data['true_course'])\n self.vel_pub.publish(current_vel)\n elif 'VTG' in parsed_sentence:\n data = parsed_sentence['VTG']\n track_made_good_degrees_true = data['track_made_good_degrees_true']\n track_made_good_degrees_magnetic = data['track_made_good_degrees_magnetic']\n speed = data['speed']\n SPEED_OVER_GROUND = data['speed_over_ground']\n if not math.isnan(track_made_good_degrees_true):\n DIRECTION_REFERENCE = \"True\"\n COURSE_OVER_GROUND = track_made_good_degrees_true\n elif not math.isnan(track_made_good_degrees_magnetic):\n DIRECTION_REFERENCE = \"Magnetic\"\n COURSE_OVER_GROUND = track_made_good_degrees_magnetic\n else:\n DIRECTION_REFERENCE = \"Null\"\n\t\tCOURSE_OVER_GROUND = float(0)\n\t\tSPEED_OVER_GROUND = float('NaN')\n current_vtg = Course_Speed()\n current_vtg.header.stamp = current_time\n current_vtg.header.frame_id = frame_id\n current_vtg.DIRECTION_REFERENCE = DIRECTION_REFERENCE\n current_vtg.COURSE_OVER_GROUND = COURSE_OVER_GROUND \n current_vtg.SPEED_OVER_GROUND = SPEED_OVER_GROUND\n self.vtg_pub.publish(current_vtg)\n #rospy.loginfo(track_made_good_degrees_magnetic)\n elif 'HDT' in parsed_sentence:\n #rospy.loginfo('HDT') \n data = parsed_sentence['HDT']\n heading_degrees = data['heading_degrees']\n current_hdt = GPHDT()\n current_hdt.header.stamp = current_time\n current_hdt.header.frame_id = frame_id\n current_hdt.HEADING_DEGREES = heading_degrees\n if self.use_GPS_time and not self.gps_time==None:\n current_hdt.header.stamp = self.gps_time-rospy.Duration(0,1000000)\n elif self.use_GPS_time:\n current_hdt.header.stamp.secs = 0\n current_hdt.header.stamp = current_hdt.header.stamp+rospy.Duration(self.time_delay)+rospy.Duration(self.time_delay_heading)\n #print(current_hdt.header.stamp.to_sec())\n self.hdt_pub.publish(current_hdt)\n else:\n rospy.loginfo('Not valid')\n return False\n\n \"\"\"Helper method for getting the frame_id with the correct TF prefix\"\"\"\n\n @staticmethod\n def get_frame_id():\n frame_id = rospy.get_param('~frame_id', 'gps')\n if frame_id[0] != \"/\":\n \"\"\"Add the TF prefix\"\"\"\n prefix = \"\"\n prefix_param = rospy.search_param('tf_prefix')\n if prefix_param:\n prefix = rospy.get_param(prefix_param)\n if prefix[0] != \"/\":\n prefix = \"/%s\" % prefix\n return \"%s/%s\" % (prefix, frame_id)\n else:\n return frame_id\n" }, { "alpha_fraction": 0.35277777910232544, "alphanum_fraction": 0.4555555582046509, "avg_line_length": 21.5, "blob_id": "8ebd4dd7931675a759569635085822c32a153b25", "content_id": "839a372c472204e95ae20ad8ec1bcf0140ce5df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 360, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/ros/src/gps/nmea_msgs/CHANGELOG.rst", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nChange log for nmea_msgs package\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n1.0.0 (2015-04-23)\n------------------\n* Release into Jade.\n\n0.1.1 (2015-02-15)\n------------------\n* Cleanup CMakeLists.txt and package.xml\n\n0.1.0 (2013-07-21)\n------------------\n* Initial version (released into Hydro)\n* Supports NMEA0183 sentences\n" }, { "alpha_fraction": 0.696789562702179, "alphanum_fraction": 0.7360285520553589, "avg_line_length": 37.81538391113281, "blob_id": "eaffb23bc9e863df50ec0c8a4d386747c94028b7", "content_id": "d7b41ff1c2c43da3bf4956f0d18bfe1125bf2952", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2523, "license_type": "no_license", "max_line_length": 158, "num_lines": 65, "path": "/README.md", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "# FieldSAFE - Dataset for Obstacle Detection in Agriculture\n\n[![Watch the video](https://vision.eng.au.dk/wp-content/uploads/2020/09/fieldsafe_youtube.png)](https://www.youtube.com/watch?v=YXz1zdaFX0E)\n\nThis repository contains the necessary software for utilizing the FieldSAFE dataset.\nSoftware and example usage scripts are available in the folder \"ros\".\nFurther, ground truth GPS annotations for all static and dynamic obstacles are contained in the folder \"ground_truth\".\n\nFor more information, visit the FieldSAFE website: [https://vision.eng.au.dk/fieldsafe/](https://vision.eng.au.dk/fieldsafe/)\n\n## Citation\nIf you use this dataset in your research or elsewhere, please cite/reference the following paper:\n\n[FieldSAFE: Dataset for Obstacle Detection in Agriculture](https://arxiv.org/abs/1709.03526)\n\n```sh\n@article{kragh2017fieldsafe,\n title={FieldSAFE: Dataset for Obstacle Detection in Agriculture},\n author={Kragh, Mikkel Fly and Christiansen, Peter and Laursen, Morten Stigaard and Larsen, Morten and Steen, Kim Arild and Green, Ole and Karstoft, Henrik and J{\\o}rgensen, Rasmus Nyholm},\n journal={arXiv preprint arXiv:1709.03526},\n year={2017}\n}\n```\n\n## Installation Instructions\nThe FieldSAFE dataset and software has been tested with Ubuntu 16.04 and ROS Kinetic, but may work with other Linux distributions and newer ROS distributions.\nBelow, installations instructions for all necessary dependencies are given.\n\n* Install ROS Kinetic on Ubuntu 16.04 (Desktop-Full Install)\n\n http://wiki.ros.org/kinetic/Installation/Ubuntu\n\n* Install the following additional packages:\n ```sh\n sudo apt-get install ros-kinetic-robot-localization \n sudo apt-get install ros-kinetic-geographic-msgs\n sudo apt-get install libpcap-dev\n ```\n* Clone and build this repository\n ```sh\n git clone https://github.com/mikkelkh/FieldSAFE\n cd FieldSAFE\n git submodule update --init --recursive\n cd ros\n catkin_make\n ```\n* Environment Setup\n ```sh\n source devel/setup.bash\n ```\n* Download a 1 minute example bag with sensor data: \n\n [2016-10-25-11-41-21_example.bag](https://vision.eng.au.dk/data/FieldSAFE/2016-10-25-11-41-21_example.bag)\n\n* Run the original demo\n ```sh\n roslaunch demo demo.launch file:=/path/to/2016-10-25-11-41-21_example.bag\n ```\n or this updated demo by [@tambetm](https://github.com/tambetm) including visualization of ground truth obstacles:\n ```sh\n roslaunch demo demo_markers.launch file:=/path/to/2016-10-25-11-41-21_example.bag\n ```\n* Download more data from: \n\n [https://vision.eng.au.dk/fieldsafe/](https://vision.eng.au.dk/fieldsafe/)\n" }, { "alpha_fraction": 0.5797434449195862, "alphanum_fraction": 0.6118150353431702, "avg_line_length": 33.03773498535156, "blob_id": "7b8fc84e3bf5ad2840d5f6ff8a49386e9c17e8f2", "content_id": "a119855e82312ae07f6a642ae50a247b6b3aea0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12628, "license_type": "no_license", "max_line_length": 197, "num_lines": 371, "path": "/ros/src/gps/htf_rtk_gps/src/rtk_gps.cpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "/****************************************************************************\n# Copyright (c) 2011-2013, author Dennis Tryk\n# [email protected]\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name FroboMind nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************/\n\n\n#include \"rtk_gps.hpp\"\n#include <boost/algorithm/string/replace.hpp>\n#include <boost/range/as_array.hpp>\n/**************************************************************\n * Constructor\n**************************************************************/\nRtkGps::RtkGps() : pub_freq(\"topic1\", updater, diagnostic_updater::FrequencyStatusParam(&min_freq, &max_freq, 0.1, 10))\n{\n\tmin_freq = 1;\n\tmax_freq = 10;\n}\n\n/**************************************************************\n * Descructor\n **************************************************************/\nRtkGps::~RtkGps()\n{\n}\n\n/**************************************************************\n * Diagnostic updater needs at least an empty method to call, \n * when running.\n **************************************************************/\nvoid RtkGps::diagnostics(diagnostic_updater::DiagnosticStatusWrapper &stat)\n{\n}\n\n/**************************************************************\n * Detect CAN identifier and call the right parser functions\n **************************************************************/\nvoid RtkGps::getGPSData_CAN(const htf_safe_msgs::can::ConstPtr& rx_msg)\n{\n\tint i;\n\tlen = rx_msg->length;\n\tif(len = 8)\n\t{\n\t\tswitch(rx_msg->id)\n\t\t{\n\t\t\tcase 0x19F8021D:\n\t\t\t\tparseCourseSpeedData(rx_msg);\n\t\t\t\tbreak;\n\t\t\tcase 0x19F8051D:\n\t\t\t\tStoreGNSSPositionData(rx_msg);\n\t\t\t\tbreak;\n\t\t}\n\t}\n}\n\n/**************************************************************\n * Detect serial identifier and call the right parser functions\n **************************************************************/\nvoid RtkGps::getGPSData_Serial(const htf_safe_msgs::serial::ConstPtr& rx_msg)\n{\n\tserialSentence.header.stamp = rx_msg->header.stamp;\n\tserialSentence.header.frame_id = frame_id_gps;\n\n\tstring sentenceOld = rx_msg->data;\n\t//sentenceOld.erase(sentenceOld.size()-1);\n\t//sentenceOld.erase(0,1);\n\tboost::replace_all(sentenceOld, \"\\n\",\"\");\n\tserialSentence.sentence = sentenceOld;// sentenceOld.substr(0,sentenceOld.length()-1);\n\trtk_gps_pub_serialSentence.publish(serialSentence);\n\n/*\n\tboost::char_separator<char> sep(\"$*,\");\n\ttokenizer tokens(rx_msg->data, sep);\n\ttokenizer::iterator tok_iter;\n\n\n\tROS_ERROR(\"INPUT MESSAGE: %s\",rx_msg->data.c_str());\n\t\n\tif(tokens.begin() != tokens.end())\n\t{\n\t\ttok_iter = tokens.begin();\n\n\t\t//ROS_ERROR(\"%s\",(*tok_iter).c_str());\n\n\t\t// Check for identifier GPGGA\n\t\tif((*tok_iter).compare(\"GPGGA\") == 0)\n\t\t{\n\t\t\tprocessGPS_Serial_GPGGA(tokens,rx_msg->data);\n\t\t}\n\n\t\t// Check for identifier GPVTG\n\t\telse if((*tok_iter).compare(\"GPVTG\") == 0){\n\t\t\tprocessGPS_Serial_GPVTG(tokens,rx_msg->data);\n\t\t}\n\t\telse{\n\t\t\tROS_INFO(\"Ignoring Unknown NMEA identifier %s\", (*tok_iter).c_str());\n\t\t}\n\t}*/\n\n}\n\n/**************************************************************\n * Parse GPS string of type GPGGA \n **************************************************************/\n/*void RtkGps::processGPS_Serial_GPGGA(tokenizer& tokens,std::string raw){\n\t//GPGGA,112112.70,5629.37803512,N,00935.20192341,E,4,08,1.2,58.938,M,43.002,M,1.7,0026*73\n\t//GPGGA,hhmmss.ss,llll.ll ,a,yyyyy.yy ,a,x,xx,x.x, x.x ,M,x.x ,M,x.x,xxxx*hh\n\n\n//uint8 SECONDS\n//uint8 MINUTES\n//uint8 HOURS\n//uint8 DAY\n//uint8 MONTH\n//uint16 YEAR\n//\t\tfloat64 Latitude\n//\t\tfloat64 Longitude\n//\t\tfloat64 Altitude\n//uint8 TypeOfSystem\n//uint8 GNSSMethod\n//uint8 GNSSIntegrity\n//\t\tfloat64 HDOP (horisontal delution of position)\n//float64 PDOP\n//\t\tuint8 NumberOfSatelites\n//uint8 NumberOfRefStations\n\n\ttokenizer::iterator tok_iter = tokens.begin();\n\n\t// skip identifier\n\ttok_iter++;\n\n\tCourseSpeedMsg.header.stamp = ros::Time::now();\n\tCourseSpeedMsg.header.frame_id = frame_id_coursespeed;\n\t// 1) hhmmss.ss = UTC of position \n\tdouble clock = atof((*tok_iter++).c_str()); \n \t//GNSSPositionDataMsg.YEAR = ptm->tm_year +1900;\n \t//GNSSPositionDataMsg.MONTH = ptm->tm_mon;\n \t//GNSSPositionDataMsg.DAY = ptm->tm_mday;\n \t//GNSSPositionDataMsg.HOURS = uint8(clock/10000);\n \t//GNSSPositionDataMsg.MINUTES = ((clock>>2) & 0x03);\n \t//GNSSPositionDataMsg.SECONDS = (clock & 0x03);\n\n\t// 2) llll.ll = latitude of position\n\tGNSSPositionDataMsg.Latitude = atof((*tok_iter++).c_str()); \n\n\t// 3) a = N or S\n\ttok_iter++; \n\n\t// 4) yyyyy.yy = Longitude of position\n\tGNSSPositionDataMsg.Longitude = atof((*tok_iter++).c_str());\n\n\t// 5) a = E or W\n\ttok_iter++; \n\n\t// 6) x = GPS quality indicator (0=invalid; 1=GPS fix; 2=Diff. GPS fix)\n\ttok_iter++; \n\n\t// 7) xx = Number of satellites in use [not those in view] xx = number of satellites in use \n\tGNSSPositionDataMsg.NumberOfSatelites = atoi((*tok_iter++).c_str());\n\t\n\t// 8) x.x = Horizontal dilution of position\n\tGNSSPositionDataMsg.HDOP = atof((*tok_iter++).c_str());\n\n\t// 9) x.x = Antenna altitude above/below mean sea level (geoid)\n\tGNSSPositionDataMsg.Altitude = atof((*tok_iter++).c_str());\n\n\t// 10) M = Meters (Antenna height unit)\n\ttok_iter++; \n\n\t// 11) x.x = Geoidal separation (Diff. between WGS-84 earth ellipsoid and mean sea level. -=geoid is below WGS-84 ellipsoid)\n\ttok_iter++; \n\n\t// 12) M = Meters (Units of geoidal separation)\n\ttok_iter++; \n\n\t// 13) x.x = Age in seconds since last update from diff. reference station\n\ttok_iter++; \n\n\t// 14) xxxx = Differential reference station ID\n\ttok_iter++; \n\n\n\n\tROS_ERROR(\"Hour: %i, Min: %i, Sec: %i\", GNSSPositionDataMsg.HOURS, GNSSPositionDataMsg.MINUTES,GNSSPositionDataMsg.SECONDS);\n\n}/*\n\n/**************************************************************\n * Parse GPS string of type GPVTG \n **************************************************************/\n/*void RtkGps::processGPS_Serial_GPVTG(tokenizer& tokens,std::string raw){\n\ttokenizer::iterator tok_iter = tokens.begin();\n\t// skip identifier\n\ttok_iter++;\n\n\t// Indicates that the direction is relative to true north.\n\tCourseSpeedMsg.DIRECTION_REFERENCE = \"True\";\n\n\t// GPVTG,33.9,T,,,000.01,N,000.03,K,R*68\n\tCourseSpeedMsg.header.stamp = ros::Time::now();\n\tCourseSpeedMsg.header.frame_id = frame_id_coursespeed;\n\tGNSSPositionDataMsg.header.stamp = ros::Time::now();\n\tGNSSPositionDataMsg.header.frame_id =frame_id_pos;\n\n\t// Converts string to float. \n\tCourseSpeedMsg.COURSE_OVER_GROUND = atof((*tok_iter++).c_str());\n\n\ttok_iter++;\n\ttok_iter++;\n\ttok_iter++;\n\n\t// Converts string to float. \n\tCourseSpeedMsg.SPEED_OVER_GROUND = atof((*tok_iter++).c_str());\n\t//ROS_ERROR(\"msg: %s\",raw.c_str());\n\tROS_ERROR(\"DIRECTION_REFERENCE: %s, COURSE_OVER_GROUND: %f, SPEED_OVER_GROUND: %f\", CourseSpeedMsg.DIRECTION_REFERENCE.c_str(), CourseSpeedMsg.COURSE_OVER_GROUND,CourseSpeedMsg.SPEED_OVER_GROUND);\n\t// Publish courseSpeed\n\trtk_gps_pub_course_speed.publish(CourseSpeedMsg);\n\t//pub_freq.tick();\n\n}*/\n/**************************************************************\n * Parse data for Course and Speed message with CAN identifier\n * 0x19F8021D and publish it.\n **************************************************************/\nvoid RtkGps::parseCourseSpeedData(const htf_safe_msgs::can::ConstPtr& rx_msg)\n{\n\tCourseSpeedMsg.header.stamp = ros::Time::now();\n\tCourseSpeedMsg.header.frame_id = frame_id_gps;\n\tGNSSPositionDataMsg.header.stamp = ros::Time::now();\n\tGNSSPositionDataMsg.header.frame_id =frame_id_gps;\n\n\tswitch((rx_msg->data[0] & 0xC0) >> 6)\n\t{\n\t\tcase 0:\n\t\t\tCourseSpeedMsg.DIRECTION_REFERENCE = \"True\";\n\t\t\tbreak;\n\t\tcase 1:\n\t\t\tCourseSpeedMsg.DIRECTION_REFERENCE = \"Magnetic\";\n\t\t\tbreak;\n\t\tcase 2:\n\t\t\tCourseSpeedMsg.DIRECTION_REFERENCE = \"Error\";\n\t\t\tbreak;\n\t\tcase 3:\n\t\t\tCourseSpeedMsg.DIRECTION_REFERENCE = \"Null\";\n\t\t\tbreak;\n\t}\n\n\tint16_t value = ((rx_msg->data[3] << 8) | rx_msg->data[2]);\n\tCourseSpeedMsg.COURSE_OVER_GROUND = boost::lexical_cast<float>(value) * 0.0001;\n\tvalue = (rx_msg->data[5] + rx_msg->data[4]);\n\tCourseSpeedMsg.SPEED_OVER_GROUND = boost::lexical_cast<float>(value) * 0.01;\n\n\trtk_gps_pub_course_speed.publish(CourseSpeedMsg);\n\tpub_freq.tick();\n}\n\n/**************************************************************\n * Store data from the 6 different messages with CAN \n * identifier 0x19F8021D, based on the Frame_PosDat byte [0]\n * If message nr 6 is recieved, calle the parseGNSSPositonData\n * function.\n **************************************************************/\nvoid RtkGps::StoreGNSSPositionData(const htf_safe_msgs::can::ConstPtr& rx_msg)\n{\n\tint i;\n\tswitch(rx_msg->data[0] & 0x0F)\n\t{\n\t\tcase 0:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame0[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 1:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame1[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 2:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame2[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 3:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame3[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 4:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame4[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 5:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame5[i] = rx_msg->data[i];\n\t\t\tbreak;\n\t\tcase 6:\n\t\t\tfor(i = 0; i < 8; i++ )\n\t\t\t\tGNSSFrame6[i] = rx_msg->data[i];\n\t\t\tparseGNSSPositonData(rx_msg);\n\t\t\tbreak;\t\n\t}\t\t\n\n}\n\n/**************************************************************\n * Parse data from the 6 messages with CAN identifier 0x19F8021D\n * and publish it.\n **************************************************************/\nvoid RtkGps::parseGNSSPositonData(const htf_safe_msgs::can::ConstPtr& rx_msg)\n{\n\tCourseSpeedMsg.header.stamp = ros::Time::now();\n\tCourseSpeedMsg.header.frame_id = frame_id_gps;\n\n\tcout.precision(16);\n\n\tuint32_t postime = (GNSSFrame1[1] << 24) | (GNSSFrame0[7] << 16) | (GNSSFrame0[6] << 8) | GNSSFrame0[5];\n\ttime_t seconds = ((time_t)((GNSSFrame0[4] << 8) | GNSSFrame0[3])*86400) + ((postime) * 0.0001); \n\n\tstruct tm * ptm;\n \tptm = gmtime ( &seconds );\n\n \tGNSSPositionDataMsg.YEAR = ptm->tm_year +1900;\n \tGNSSPositionDataMsg.MONTH = ptm->tm_mon;\n \tGNSSPositionDataMsg.DAY = ptm->tm_mday;\n \tGNSSPositionDataMsg.HOURS = ptm->tm_hour;\n \tGNSSPositionDataMsg.MINUTES = ptm->tm_min;\n \tGNSSPositionDataMsg.SECONDS = ptm->tm_sec;\n\n\n\tuint32_t lat = (GNSSFrame2[2] << 24) | (GNSSFrame2[1] << 16) | (GNSSFrame1[7] << 8) | GNSSFrame1[6];\n\tGNSSPositionDataMsg.Latitude = boost::lexical_cast<double>(lat) * 0.000000429497;\n\n\tuint32_t longitude = (GNSSFrame3[3] << 24) | (GNSSFrame3[2] << 16) | (GNSSFrame3[1] << 8) | GNSSFrame2[7];\n\tGNSSPositionDataMsg.Longitude = boost::lexical_cast<double>(longitude) * 0.000000429497;\n\t\n\tuint32_t altitude = (GNSSFrame3[7] << 24) | (GNSSFrame3[6] << 16) | (GNSSFrame3[5] << 8) | GNSSFrame3[4];\n\tGNSSPositionDataMsg.Altitude = boost::lexical_cast<double>(altitude) * 0.000001;\n\n\tGNSSPositionDataMsg.TypeOfSystem = GNSSFrame4[5] & 0x0F;\n\tGNSSPositionDataMsg.GNSSMethod = (GNSSFrame4[5] & 0xF0) >> 4;\n\tGNSSPositionDataMsg.GNSSIntegrity = (GNSSFrame4[6] & 0xC0) >> 4;\n\n\tuint16_t HDOP = (GNSSFrame5[2] << 8) | GNSSFrame5[1];\n\tGNSSPositionDataMsg.HDOP = boost::lexical_cast<double>(HDOP) * 0.01;\n\n\tuint16_t PDOP = (GNSSFrame5[4] << 8) | GNSSFrame5[3];\n\tGNSSPositionDataMsg.PDOP = boost::lexical_cast<double>(PDOP) * 0.01;\n\n\tGNSSPositionDataMsg.NumberOfSatelites = GNSSFrame4[7];\n\tGNSSPositionDataMsg.NumberOfRefStations = GNSSFrame6[2];\n\t\n\trtk_gps_pub_GNSSPositionData.publish(GNSSPositionDataMsg);\n}\n" }, { "alpha_fraction": 0.755033552646637, "alphanum_fraction": 0.7651006579399109, "avg_line_length": 15.5, "blob_id": "243229fa93c55ee0647b1e0e0005c0b35dea56f9", "content_id": "f00949e4720fa84b1212b0f0aa355c21846b4f46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 298, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/ros/src/camera_info_publisher/CMakeLists.txt", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(camera_info_publisher)\n\nfind_package(catkin REQUIRED COMPONENTS\n rospy\n std_msgs\n)\n\ncatkin_package()\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n)\n\ninstall(PROGRAMS\n src/camera_info_publisher.py\n DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}\n)\n\n" }, { "alpha_fraction": 0.7042328119277954, "alphanum_fraction": 0.7201058268547058, "avg_line_length": 35.11465072631836, "blob_id": "56f2f7e07715c3e7af4273329c80cf2ce628fff3", "content_id": "39aa2614cd2f5a32a83a5d2c772104f20c9f37be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5670, "license_type": "no_license", "max_line_length": 127, "num_lines": 157, "path": "/ros/src/gps/gps_to_odometry/src/gps_hdt_to_odometry_node.cpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "/****************************************************************************\n# Copyright (c) 2016, author Mikkel Kragh Hansen\n# [email protected]\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name FroboMind nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n ****************************************************************************/\n\n\n#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include <sstream>\n\n//#include \"htf_safe_msgs/Course_Speed.h\"\n//#include \"htf_rtk_gps/GNSSPositionData.h\"\n#include \"htf_safe_msgs/GPHDT.h\"\n\n#include <nav_msgs/Odometry.h>\n#include <geometry_msgs/Pose.h>\n#include <geometry_msgs/PoseWithCovariance.h>\n#include <geometry_msgs/Point.h>\n#include <geometry_msgs/Twist.h>\n#include <geometry_msgs/Quaternion.h>\n#include <sensor_msgs/Imu.h>\n#include <tf/tf.h>\n\n#define PI 3.14159265359f\n#define DEGREE_TO_RADIANS 2*PI/360\n\nros::Publisher pubOdometry, pubIMU;\n\nstd::string parent_frame, child_frame;\ndouble covariance_yaw;\n\nvoid gps_GPHDT_handler(const htf_safe_msgs::GPHDTConstPtr msg_hdt)\n{\n\t// Orientation\n\tconst double roll = 0.0, pitch = 0.0;\n\tconst double yaw = 2*PI-msg_hdt->HEADING_DEGREES*DEGREE_TO_RADIANS-PI/2;\n\n\ttf::Quaternion q;\n\tq.setRPY(roll, pitch, yaw);\n\tgeometry_msgs::Quaternion orientation;\n\ttf::quaternionTFToMsg(q, orientation);\n\n\t// Position\n\tgeometry_msgs::Point position;\n\tposition.x = 0;\n\tposition.y = 0;\n\tposition.z = 0; // Not used for 2D odometry\n\t\n\tgeometry_msgs::Twist twist;\n\t//twist.linear.x = msg_gps_course_speed->SPEED_OVER_GROUND;\n\n\t// Pose\n\tgeometry_msgs::Pose pose;\n\tpose.position = position;\n\tpose.orientation = orientation;\n\n\tgeometry_msgs::PoseWithCovariance poseWithCovariance;\n\tposeWithCovariance.pose = pose;\n\tgeometry_msgs::TwistWithCovariance twistWithCovariance;\n\ttwistWithCovariance.twist = twist;\n//\tfloat covariance[36];\n\tboost::array<float, 36> covariance;\n\tstd::fill( covariance.begin(), covariance.begin() + covariance.size(), 0 );\n\tfor (int i=0;i<6;i++)\n\t\tcovariance[i+i*6]=1.0f;\n\n\tposeWithCovariance.covariance = covariance;\n\ttwistWithCovariance.covariance = covariance;\n\n\tnav_msgs::Odometry msg_odometry;\n\tmsg_odometry.header = msg_hdt->header;\n\tmsg_odometry.header.frame_id = parent_frame;\n\tmsg_odometry.child_frame_id = child_frame;\n\tmsg_odometry.pose = poseWithCovariance;\n\tmsg_odometry.twist = twistWithCovariance;\n\n\tpubOdometry.publish(msg_odometry);\n\n\t// IMU\n\tboost::array<float, 9> orientation_covariance;\n\tstd::fill( orientation_covariance.begin(), orientation_covariance.begin() + orientation_covariance.size(), 0 );\n\torientation_covariance.at(0 + 0 * 3) = 1000.0f;\n\torientation_covariance.at(1 + 1 * 3) = 1000.0f;\n\torientation_covariance.at(2 + 2 * 3) = covariance_yaw;\n\t//\tfor (int i=0;i<3;i++)\n//\t\torientation_covariance.at(i + i * 3) = 1.0f;\n\tgeometry_msgs::Vector3 angular_velocity;\n\tangular_velocity.x = 0;\n\tangular_velocity.y = 0;\n\tangular_velocity.z = 0;\n\tboost::array<float, 9> angular_velocity_covariance;\n\tstd::fill( angular_velocity_covariance.begin(), angular_velocity_covariance.begin() + angular_velocity_covariance.size(), 0 );\n\tfor (int i=0;i<3;i++)\n\t\tangular_velocity_covariance.at(i + i * 3) = 1000.0f;\n\tgeometry_msgs::Vector3 linear_acceleration;\n\tlinear_acceleration.x = 0; // cos(yaw);\n\tlinear_acceleration.y = 0; //-sin(yaw);\n\tlinear_acceleration.z = 0;\n\n\tsensor_msgs::Imu msg_imu;\n\tmsg_imu.header = msg_hdt->header;\n\tmsg_imu.header.frame_id = \"RTK_GPS_HEADING\";\n\tmsg_imu.orientation = orientation;\n\tmsg_imu.orientation_covariance = orientation_covariance;\n\tmsg_imu.angular_velocity = angular_velocity;\n\tmsg_imu.angular_velocity_covariance = angular_velocity_covariance;\n\tmsg_imu.linear_acceleration = linear_acceleration;\n\n\n\tpubIMU.publish(msg_imu);\n}\n\nint main(int argc, char **argv)\n{\n\tros::init(argc, argv, \"htf_rtk_gps_to_odometry_node\");\n\tros::NodeHandle n;\n\tros::NodeHandle private_n(\"~\");\n\n\n\tROS_INFO(\"Started htf_rtk_gps_to_odometry_node\");\n\n\tprivate_n.param<std::string>(\"parent_frame\", parent_frame, \"odom\");\n\tprivate_n.param<std::string>(\"child_frame\", child_frame, \"RTK_GPS_HEADING\");\n\tprivate_n.param<double>(\"covariance_yaw\", covariance_yaw, 0.0f);\n\n\tros::Subscriber sub_gps_GPHDT = n.subscribe<htf_safe_msgs::GPHDT>(\"gps/GPHDT\", 1, gps_GPHDT_handler);\n\tpubOdometry = n.advertise<nav_msgs::Odometry>(\"gps/odometry\", 1);\n\tpubIMU = n.advertise<sensor_msgs::Imu>(\"gps/imu\", 1);\n\n\tros::spin();\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5148905515670776, "alphanum_fraction": 0.5278076529502869, "avg_line_length": 32.578311920166016, "blob_id": "da320d1565ce1cc59c79c3fb82c4898da74a10be", "content_id": "0235b452c4a87bf91a9b3064893d2b9b564dc9b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2787, "license_type": "no_license", "max_line_length": 109, "num_lines": 83, "path": "/ros/src/ground_truth/src/static_visualizer.py", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\n @Author: Mahir Gulzar, Tambet Matiisen\n'''\n\nimport rospy\nimport numpy as np\nimport os\nimport rospkg\nfrom visualization_msgs.msg import Marker\n#from std_msgs.msg import Header\n#import tf\n\n\nclass StaticVisualizer:\n def __init__(self):\n self.static_pub = rospy.Publisher(\n 'static_markers', Marker, queue_size=10)\n\n #self.listener = tf.TransformListener()\n self.package_path = rospkg.RosPack().get_path('ground_truth')\n\n # load static markers\n self.static_markers = np.loadtxt(\n os.path.join(self.package_path,\n 'data/gps_marker_positions.csv'),\n delimiter=';', skiprows=1, usecols=(0, 3, 4, 5))\n\n # Run publish loop\n def run(self):\n '''\n # wait until the transforms are available\n self.listener.waitForTransform(\n 'utm', 'base_link', rospy.Time(0), rospy.Duration(1.0))\n '''\n\n r = rospy.Rate(10)\n while not rospy.is_shutdown():\n for gcp, utm_x, utm_y, altitude in self.static_markers:\n marker = Marker()\n marker.header.frame_id = 'utm'\n marker.type = marker.CUBE\n marker.action = marker.ADD\n marker.id = gcp\n marker.scale.x = 2\n marker.scale.y = 2\n marker.scale.z = 10\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = utm_x\n marker.pose.position.y = utm_y\n marker.pose.position.z = 63.0\n '''\n # look up transform between base_frame and UTM at the moment of publishing\n try:\n matrix = self.listener.asMatrix('utm', Header(stamp=rospy.Time(0), frame_id='base_link'))\n pos = matrix.dot([0, 0, 1, 1])\n marker.pose.position.z = pos[2]\n except (tf.LookupException, tf.ConnectivityException,\n tf.ExtrapolationException) as e:\n rospy.logwarn(\"Error looking up transform: \" + str(e))\n '''\n\n self.static_pub.publish(marker)\n\n try:\n r.sleep()\n except rospy.ROSTimeMovedBackwardsException as e:\n rospy.logwarn(\"Error during sleep: \" + str(e))\n except rospy.ROSInterruptException:\n # stop silently when ROS is stopped\n break\n\n\nif __name__ == \"__main__\":\n rospy.init_node('static_visualizer', anonymous=False)\n static_visualizer = StaticVisualizer()\n static_visualizer.run()\n" }, { "alpha_fraction": 0.6105106472969055, "alphanum_fraction": 0.6280283331871033, "avg_line_length": 32.9620246887207, "blob_id": "c0250c961bd2f9da580edd52f865b95dbac7adab", "content_id": "9ffc7eec0f977f8589367c1e93a5543629c7ae03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "no_license", "max_line_length": 165, "num_lines": 79, "path": "/ros/src/camera_info_publisher/src/camera_info_publisher.py", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Courtesy: https://gist.githubusercontent.com/rossbar/ebb282c3b73c41c1404123de6cea4771/raw/f34e0a576ddb1a6afdcd4c0d5f44af877c5e6231/yaml_to_camera_info_publisher.py\n\n'''\n @Modifed by: Mahir Gulzar, Tambet Matiisen\n'''\n\n# Publishes camera_info from provided yaml file\n\nimport rospy\nimport yaml\nimport rospkg\nfrom sensor_msgs.msg import CameraInfo\nimport os\n\n\nclass CameraInfoPublisher:\n def __init__(self):\n package_path = rospkg.RosPack().get_path('camera_info_publisher')\n yaml_fname = rospy.get_param('~yaml_file', 'data/FlirA65.yaml')\n self.yaml_path = os.path.join(package_path, yaml_fname)\n rospy.loginfo(package_path + ':' + self.yaml_path)\n\n self.publisher = rospy.Publisher(\n 'camera_info', CameraInfo, queue_size=10)\n self.rate = rospy.Rate(10)\n\n def get_camera_info(self, yaml_fname):\n \"\"\"\n Parse a yaml file containing camera calibration data\n Parameters\n ----------\n yaml_fname : str\n Path to yaml file containing camera calibration data\n\n Returns\n -------\n camera_info_msg : sensor_msgs.msg.CameraInfo\n A sensor_msgs.msg.CameraInfo message containing the camera calibration\n data\n \"\"\"\n # Load data from file\n with open(yaml_fname, \"r\") as file_handle:\n calib_data = yaml.load(file_handle)\n # Parse\n camera_info_msg = CameraInfo()\n camera_info_msg.width = calib_data[\"image_width\"]\n camera_info_msg.height = calib_data[\"image_height\"]\n camera_info_msg.K = calib_data[\"intrinsics\"][\"data\"]\n camera_info_msg.D = calib_data[\"distortion_coefficients\"][\"data\"]\n camera_info_msg.R = calib_data[\"rectification_matrix\"][\"data\"]\n camera_info_msg.P = calib_data[\"projection_matrix\"][\"data\"]\n camera_info_msg.distortion_model = calib_data[\"distortion_model\"]\n camera_info_msg.header.frame_id = calib_data[\"frame_id\"]\n\n return camera_info_msg\n\n # Run publish loop\n def run(self):\n msg = self.get_camera_info(self.yaml_path)\n while not rospy.is_shutdown():\n msg.header.stamp = rospy.get_rostime()\n self.publisher.publish(msg)\n try:\n self.rate.sleep()\n except rospy.ROSTimeMovedBackwardsException as e:\n rospy.logwarn(\"Error during sleep: \" + str(e))\n except rospy.ROSInterruptException:\n break\n\n\nif __name__ == \"__main__\":\n\n # Initialize publisher node\n rospy.init_node(\"camera_info_publisher\", anonymous=False)\n\n camera_info_publisher = CameraInfoPublisher()\n camera_info_publisher.run()\n" }, { "alpha_fraction": 0.5392183065414429, "alphanum_fraction": 0.5533102750778198, "avg_line_length": 34.48113250732422, "blob_id": "8c7251b372921edaa6049a64eb19fb0bb99972d4", "content_id": "7c5dadd280432f81bbbcd284e1727034829c80d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3761, "license_type": "no_license", "max_line_length": 105, "num_lines": 106, "path": "/ros/src/ground_truth/src/dynamic_visualizer.py", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\n @Author: Mahir Gulzar, Tambet Matiisen\n'''\n\nimport rospy\nimport numpy as np\nimport os\nimport rospkg\nfrom visualization_msgs.msg import Marker\n#from std_msgs.msg import Header\n#import tf\n\n\nclass DynamicVisualizer:\n def __init__(self):\n self.dynamic_pub = rospy.Publisher(\n 'dynamic_markers', Marker, queue_size=10)\n\n #self.listener = tf.TransformListener()\n self.package_path = rospkg.RosPack().get_path('ground_truth')\n\n # load dynamic ground truth\n self.dynamic_markers = np.loadtxt(\n os.path.join(self.package_path,\n 'data/dynamic_ground_truth.txt'),\n delimiter=',', skiprows=1, usecols=(0, 1, 2, 4, 3),\n dtype={'names': ('track_id', 'x', 'y', 'timestamp', 'frame'),\n 'formats': (np.uint8, np.uint32, np.uint32, np.float64, np.uint32)})\n\n # sort dynamic ground truth by timestamp, inplace\n self.dynamic_markers.sort(order='timestamp')\n\n # load pixel to utm transform\n utm2pixels = np.loadtxt(os.path.join(\n self.package_path, 'data/utm2PixelsTransformMatrix.csv'), delimiter=',')\n self.pixels2utm = np.linalg.inv(utm2pixels)\n\n # Run publish loop\n def run(self):\n '''\n # wait until the transforms are available\n self.listener.waitForTransform(\n 'utm', 'base_link', rospy.Time(0), rospy.Duration(1.0))\n '''\n for track_id, x, y, timestamp, frame in self.dynamic_markers:\n\n # skip drone\n if track_id == 4:\n continue\n\n # convert current position in pixels to utm\n pos = np.array([y, x, 1])\n pos = np.dot(self.pixels2utm, pos)\n\n # create marker\n marker = Marker()\n marker.header.frame_id = 'utm'\n marker.header.stamp = rospy.Time.from_sec(timestamp)\n marker.type = marker.CYLINDER\n marker.action = marker.ADD\n marker.id = track_id\n marker.scale.x = 1\n marker.scale.y = 1\n marker.scale.z = 1.5\n marker.color.r = 1.0\n marker.color.g = 0.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = pos[0]\n marker.pose.position.y = pos[1]\n marker.pose.position.z = 63.0\n '''\n # look up transform between base_frame and UTM at the moment of publishing\n try:\n matrix = self.listener.asMatrix('utm', Header(stamp=rospy.Time(0), frame_id='base_link'))\n pos = matrix.dot([0, 0, 1, 1])\n marker.pose.position.z = pos[2]\n except (tf.LookupException, tf.ConnectivityException,\n tf.ExtrapolationException) as e:\n rospy.logwarn(\"Error looking up transform: \" + str(e))\n '''\n\n # check if current stamp is ahead of ros current_time\n current_time = rospy.get_time()\n if timestamp > current_time:\n # sleep until it is the right moment to publish\n try:\n rospy.sleep(timestamp - current_time)\n except rospy.ROSTimeMovedBackwardsException as e:\n rospy.logwarn(\"Error during sleep: \" + str(e))\n except rospy.ROSInterruptException:\n # stop silently when ROS is stopped\n break\n\n # publish the marker\n self.dynamic_pub.publish(marker)\n\n\nif __name__ == \"__main__\":\n rospy.init_node('dynamic_visualizer', anonymous=False)\n dynamic_visualizer = DynamicVisualizer()\n dynamic_visualizer.run()\n" }, { "alpha_fraction": 0.7204301357269287, "alphanum_fraction": 0.7231183052062988, "avg_line_length": 24.363636016845703, "blob_id": "feac8ee749f8ebf35ab32346376163ef1b57ef11", "content_id": "ddc007d2d5626605c3c4050c05136960b3fd566a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 96, "num_lines": 44, "path": "/ros/src/mirror_image/src/mirror_image.py", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom cv_bridge import CvBridge\n\nfrom sensor_msgs.msg import Image\n\nrospy.init_node('mirror_image', anonymous=False)\nnodeName = rospy.get_name()\n\n# Name of input topics from launch-file\ntopic_image_in = rospy.get_param(nodeName + '/topicImageIn', nodeName + '/UnknownInputTopic')\n#print(\"topic_image_in\",topic_image_in)\n\n# Name of output topics from launch-file\ntopic_image_out = rospy.get_param(nodeName + '/topicImageOut', nodeName + '/UnknownOutputTopic')\n#print(\"topic_image_out\",topic_image_out)\n\n# Publishers\npub_image = rospy.Publisher(topic_image_out, Image, queue_size=0)\n\nbridge = CvBridge()\n\n\ndef callback_bb(image):\n cv_image = bridge.imgmsg_to_cv2(image, desired_encoding=\"passthrough\")\n\n image_message = bridge.cv2_to_imgmsg(np.fliplr(np.flipud(cv_image)), encoding=\"passthrough\")\n image_message.header.frame_id = 'cam_thermal_frame'\n pub_image.publish(image_message)\n\n\n# Get subscripers\nrospy.Subscriber(topic_image_in, Image, callback_bb, queue_size=None)\n\n\n# main\ndef main():\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7035821676254272, "alphanum_fraction": 0.707410454750061, "avg_line_length": 47.1184196472168, "blob_id": "2ed646e1b280566feed2a238899a58870f0f0d4b", "content_id": "7507154752045154ff2ff6aff3fae06ad267b78e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3657, "license_type": "no_license", "max_line_length": 131, "num_lines": 76, "path": "/ros/src/gps/htf_rtk_gps/src/rtk_gps_node.cpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "/****************************************************************************\n# Copyright (c) 2011-2013, author Dennis Tryk\n# [email protected]\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name FroboMind nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************/\n\n\n#include \"rtk_gps.hpp\"\n\nint main(int argc, char **argv){\n\n ros::init(argc, argv, \"htf_delphi_esr\");\n ros::NodeHandle nh(\"~\");\n\n RtkGps rtk_gps;\n\n string subscribe_can_topic_id;\n string subscribe_serial_topic_id;\n string publish_topic_id_Course_Speed;\n string publish_topic_id_Time;\n string publish_topic_id_Position;\n string publish_topic_id_GNSS_position_data;\n string publish_topic_id_SerialSentence;\n\n rtk_gps.updater.setHardwareID(\"RTK GPS\");\n rtk_gps.updater.add(\"RTK GPS\", &rtk_gps, &RtkGps::diagnostics);\n\n // Get setting from parameterserver\n nh.param<std::string> (\"rtk_pub_course_speed\", publish_topic_id_Course_Speed, \"/Trimble_rtk_gps/course_speed\");\n nh.param<std::string> (\"rtk_pub_GNSS_position_data\", publish_topic_id_GNSS_position_data, \"/Trimble_rtk_gps/GNSS_position_data\");\n nh.param<std::string> (\"rtk_pub_SerialSentence\", publish_topic_id_SerialSentence, \"/Trimble_rtk_gps/SerialSentence\");\n nh.param<std::string> (\"can_rx_topic\", subscribe_can_topic_id, \"/fmLib/can_rx\");\t\t // Substribe to can\n nh.param<std::string> (\"serial_rx_topic\", subscribe_serial_topic_id, \"/fmLib/serial_rx\"); // Subscribe to serial\n nh.param<std::string> (\"frame_id\", rtk_gps.frame_id_gps, \"RTK_GPS\");\n \n // Publish rtk_gps\n rtk_gps.rtk_gps_pub_course_speed = nh.advertise<htf_safe_msgs::Course_Speed>(publish_topic_id_Course_Speed, 1);\n rtk_gps.rtk_gps_pub_GNSSPositionData = nh.advertise<htf_safe_msgs::GNSSPositionData>(publish_topic_id_GNSS_position_data, 1);\n rtk_gps.rtk_gps_pub_serialSentence = nh.advertise<nmea_msgs::Sentence>(publish_topic_id_SerialSentence, 1);\n\n // Subscibe to can_rx\n ros::Subscriber sub_can = nh.subscribe(subscribe_can_topic_id, 1, &RtkGps::getGPSData_CAN, &rtk_gps);\n // Subscibe to serial_rx\n ros::Subscriber sub_serial = nh.subscribe(subscribe_serial_topic_id, 1, &RtkGps::getGPSData_Serial, &rtk_gps);\n\n ros::spin();\n //while(ros::ok()){\n\n //ros::spinOnce();\n //rtk_gps.updater.update();\n //}\n \n return 0;\n}\n" }, { "alpha_fraction": 0.7990012764930725, "alphanum_fraction": 0.8014981150627136, "avg_line_length": 71.81818389892578, "blob_id": "bf1036b8598abbd0b5dee3e5dfaa71c72fdb4399", "content_id": "edb6a7f4a48db22e807c98a66fadbd4078c83f1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 801, "license_type": "no_license", "max_line_length": 186, "num_lines": 11, "path": "/ground_truth/static/readme.md", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "# Usage\nGround truth data for static obstacles is contained in *static_labels.png*.\n\nThe conversion from RGB-values to class labels is available in *labels.csv*.\n\nA number of GPS markers were placed on the field and measured with exact GPS positions.\nThere are available in *gps_marker_positions.csv* along with the corresponding pixel coordinates (for looking up in *static_labels.png*).\nThe correspondences have been used to generate a transformation matrix that converts UTM coordinates to pixel coordinates (x and y). Both have to be specified in homogeneous coordinates.\nThe transformation matrix is available in *utm2PixelsTransformMatrix.csv*. A simple usage example is shown in *demo.m*.\n\nThe script *RGB2Labels.m* converts from the RGB-colored labels in *static_labels.png* to label indices.\n" }, { "alpha_fraction": 0.7231416702270508, "alphanum_fraction": 0.731276273727417, "avg_line_length": 34.28712844848633, "blob_id": "2c4fb7082454e349ed754f0abc4ec950aafbc401", "content_id": "6f8713a13ad18c01f607d0ec76f5d32a08f4a8cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3565, "license_type": "no_license", "max_line_length": 81, "num_lines": 101, "path": "/ros/src/gps/htf_rtk_gps/src/rtk_gps.hpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "/****************************************************************************\n# Copyright (c) 2011-2013, author Dennis Tryk\n# [email protected]\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name FroboMind nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************/\n\n#ifndef RTK_GPS_H\n#define RTK_GPS_H\n\n#include <stdio.h>\n#include <stdlib.h>\n\n#include <iostream>\n\n\n//#include <boost/tokenizer.hpp>\n//#include <boost/lexical_cast.hpp>\n\n#include <string.h>\n\n#include <htf_safe_msgs/can.h>\n#include <htf_safe_msgs/serial.h>\n#include <nmea_msgs/Sentence.h>\n#include \"htf_safe_msgs/Course_Speed.h\"\n#include \"htf_safe_msgs/GNSSPositionData.h\"\n\n\n#include \"ros/ros.h\"\n#include <diagnostic_updater/diagnostic_updater.h>\n#include <diagnostic_updater/publisher.h>\n\n//typedef boost::tokenizer<boost::char_separator<char> > tokenizer;\n\nusing namespace std;\n\nclass RtkGps\n{\npublic:\n\t/*Public Methods*/\n\tRtkGps();\n\t~RtkGps();\n\tvoid diagnostics(diagnostic_updater::DiagnosticStatusWrapper &stat);\n\tvoid getGPSData_CAN(const htf_safe_msgs::can::ConstPtr& rx_msg);\n\tvoid getGPSData_Serial(const htf_safe_msgs::serial::ConstPtr& rx_msg);\n\t//void processGPS_Serial_GPVTG(tokenizer& tokens,std::string raw);\n\t//void processGPS_Serial_GPGGA(tokenizer& tokens,std::string raw);\n\n\t/*Public Var*/\n\tstring frame_id_gps;\n\tdiagnostic_updater::Updater updater;\n\tros::Publisher rtk_gps_pub_course_speed;\n\tros::Publisher rtk_gps_pub_GNSSPositionData;\n\tros::Publisher rtk_gps_pub_serialSentence;\nprivate:\n\t/*Private Methods*/\n\tvoid parseCourseSpeedData(const htf_safe_msgs::can::ConstPtr& rx_msg);\n\tvoid StoreGNSSPositionData(const htf_safe_msgs::can::ConstPtr& rx_msg);\n\tvoid parseGNSSPositonData(const htf_safe_msgs::can::ConstPtr& rx_msg);\n\n\n\t/*Private Var*/\n\thtf_safe_msgs::Course_Speed CourseSpeedMsg;\n\thtf_safe_msgs::GNSSPositionData GNSSPositionDataMsg;\n\tnmea_msgs::Sentence serialSentence;\n\tdiagnostic_updater::HeaderlessTopicDiagnostic pub_freq;\n\tsocklen_t len;\n\tuint8_t GNSSFrame0[8];\n\tuint8_t GNSSFrame1[8];\n\tuint8_t GNSSFrame2[8];\n\tuint8_t GNSSFrame3[8];\n\tuint8_t GNSSFrame4[8];\n\tuint8_t GNSSFrame5[8];\n\tuint8_t GNSSFrame6[8];\n\t\n\tdouble min_freq;\n\tdouble max_freq;\n};\n\n#endif\t\n" }, { "alpha_fraction": 0.626092791557312, "alphanum_fraction": 0.637525200843811, "avg_line_length": 25.553571701049805, "blob_id": "f44f1ea4af61e3325b8d68ee88dc27ed54390abc", "content_id": "0e60c677bb5a113d568b8364c2cad7bde69ce3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 107, "num_lines": 56, "path": "/ros/src/image_publisher/src/main.cpp", "repo_name": "mraltuntass/FieldSAFE", "src_encoding": "UTF-8", "text": "#include <ros/ros.h>\n#include <sensor_msgs/Image.h>\n#include <cv_bridge/cv_bridge.h>\n\n// OpenCV\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n\nint main(int argc, char **argv) {\n\n std::string topicArg;\n float rateArg;\n std::string mapImagePathArg;\n std::string encoding;\n\n ros::init(argc, argv, \"map_publisher\");\n ros::NodeHandle n(\"~\");\n\n ROS_INFO(\"Started map_publisher\");\n\n n.param<std::string>(\"image_path\", mapImagePathArg, \"/foo/bar.png\");\n n.param<std::string>(\"topic\", topicArg, \"/map\");\n n.param<float>(\"rate\", rateArg, 1);\n ros::Rate rate(rateArg); // The rate with which we send\n\n // LOAD\n cv::Mat image(cv::imread( mapImagePathArg, 1 ));\n if( !image.data ) {\n throw std::runtime_error(\"Loading image failed\");\n }\n\n // Sanity checks\n if (image.depth() != CV_8U) {\n ROS_ERROR_STREAM(\"Can only handle images 8 bit depth = \");\n return 0;\n }\n if (image.channels() == 1 /*It's an gray image*/) {\n encoding = \"mono8\";\n } else if (image.channels() == 3) {\n encoding = \"bgr8\";\n } else {\n ROS_ERROR_STREAM(\"Can only handle images with 1 or 3 channels: num channels = \" << image.channels());\n return 0;\n }\n\n sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), encoding, image).toImageMsg();\n\n ros::Publisher publisher = n.advertise<sensor_msgs::Image>(topicArg, 1);\n\n while (n.ok()) {\n publisher.publish(msg);\n rate.sleep();\n }\n return 0;\n}\n" } ]
17
qxc0014/Cluster
https://github.com/qxc0014/Cluster
af843bcca08e9ab49b32db5b8aadcd938fc38570
bf2834c93b79e8526867718610b8b59516a3eafb
8e820ef9808127c9bf4af8df3a4bc7cb93a39b12
refs/heads/master
2022-06-13T01:44:18.569687
2020-05-06T05:53:29
2020-05-06T05:53:29
261,664,718
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4990520477294922, "alphanum_fraction": 0.5253844261169434, "avg_line_length": 34.69173049926758, "blob_id": "1b37113aa28401703687a9d739fd5633275cad0c", "content_id": "0662fc72d763ec73f178c24e783ce60a9e00abbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4855, "license_type": "no_license", "max_line_length": 117, "num_lines": 133, "path": "/GMM/gmm.cpp", "repo_name": "qxc0014/Cluster", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<fstream>\n#include<vector>\n#include<opencv2/core.hpp>\n#include<opencv2/opencv.hpp>\n#include<opencv2/highgui/highgui.hpp>\n#include<Eigen/Core>\n#include<Eigen/Dense>\n#include<Eigen/Eigenvalues>\nusing namespace std;\nusing namespace cv;\nconst float min_extent = 1;\nconst float com_float = 2e6;\nconst int img_size=400;\nconst int k = 3;//่š็ฑปๆˆ2\nint point_num =1500;\nstring points_list = \"/home/esoman/c++code/c++/GMM/blobs.txt\";\nclass Gauss{\n public:\n Point2d center;//ๅ‡ๅ€ผ\n Eigen::Matrix2d covMat;//ๆ–นๅทฎ\n double pai_k;//ๆจกๅž‹็š„ๆƒ้‡vector\n Eigen::MatrixXd gauss_result;\n Eigen::MatrixXd p_z_x;\n double Nk;\n long double cal_p(Point2d point){\n double gailv=0;\n Eigen::Vector2d error(point.x-this->center.x,point.y-this->center.y);\n //double w = -0.5*error.transpose()*(covMat.inverse())*error;\n // cout <<\"w=\"<< w <<endl;\n gailv = (1./sqrt(2 * M_PI * covMat.determinant()))*exp(-0.5*error.transpose()*(covMat.inverse())*error);\n return gailv;\n }\n};\nint main(int argc, char const *argv[])\n{\n //่ฏปๅ–ๆ•ฐๆฎ\n ifstream fin;\n fin.open(points_list,ios::in);\n string buff;\n vector<Point2d> datasets;\n while (getline(fin, buff))\n {\n Point2d points;\n char *s_input = (char *)buff.c_str();\n const char * split = \",\";\n char *p = strtok(s_input, split);\n double a,b;\n\t\ta = atof(p);\n points.x = a*10+img_size/2;//้œ€่ฆๆ นๆฎๅฏนๅบ”็š„็‚นไบ‘่ฐƒๆ•ดๅคงๅฐ\n\t\tp = strtok(NULL, split);\n b = atof(p);\n points.y = b*10+img_size/2;\n datasets.push_back(points);\n }\n //็ป˜ๅˆถๆ‰€ๆœ‰ๆ•ฐๆฎ็‚น\n Mat image(img_size, img_size, CV_8UC3, cv::Scalar(255, 255, 255));\n for(int i =0;i<datasets.size();i++){\n cv::circle(image,datasets[i],1,cv::Scalar(0, 0, 0));\n }\n //gmmๅˆๅง‹ๅŒ–\n //้šๆœบ้€‰ๅ–ไธคไธช็‚นไฝœไธบๅˆๅง‹miu\n int center_index[k];\n srand(time(0));\n center_index[0] = rand()%datasets.size();\n while((center_index[1] = rand()%datasets.size())==center_index[0]){\n }\n vector<Gauss> gauss_dis(k); \n for(int i = 0;i<k;i++){\n gauss_dis[i].center = datasets[center_index[i]];\n gauss_dis[i].covMat = 8*Eigen::Matrix2d::Identity(2,2);\n gauss_dis[i].pai_k = 1. / (double)k;\n }\n // cout <<p_x<< endl;\n\n while(1){\n //E-step\n \n Eigen::MatrixX3d point_p_z_x(datasets.size(),k);\n for(int j=0;j<datasets.size();j++){\n long double p_x=0;\n for(int i=0;i<k;i++){\n p_x += gauss_dis[i].pai_k*gauss_dis[i].cal_p(datasets[j]);\n } \n //cout <<\"px=\"<< p_x<< endl;\n for(int i=0;i<k;i++){\n double p_z_x=0;\n if(p_x != 0){\n p_z_x = gauss_dis[i].pai_k*gauss_dis[i].cal_p(datasets[j]) / (long double)p_x;\n point_p_z_x(j,i) = p_z_x;\n }\n\n }\n circle(image,datasets[j],2,cv::Scalar(255*point_p_z_x(j,0), 255*point_p_z_x(j,1), 255*point_p_z_x(j,2)));\n }\n //M-step\n //่ฎก็ฎ—Nk\n //cout <<\"pzx:\"<< point_p_z_x<<endl;\n for(int i =0;i<k;i++){\n // cout << gauss_dis[i].covMat.determinant() << endl;\n // circle(image, gauss_dis[i].center,gauss_dis[i].covMat.determinant(),cv::Scalar(0, 0, 255));\n gauss_dis[i].Nk = point_p_z_x.col(i).sum();\n cout <<\"Nk\" << gauss_dis[i].Nk << endl;\n //circle(image, gauss_dis[i].center,gauss_dis[i].covMat.determinant(),cv::Scalar(0, 0, 255));\n // gauss_dis[i].covMat.eigenvalues() ;\n //Size size(gauss_dis[i].covMat.eigenvalues,gauss_dis[i].covMat.eigenvalues[1]);\n //cvEllipse(&image,gauss_dis[i].center,size,2*M_PI,M_PI,-M_PI,cv::Scalar(0, 0, 255));\n }\n for(int i = 0;i<k;i++){\n Point2d sum_point(0,0);\n Eigen::Matrix2d sum_covMat(Eigen::Matrix2d::Zero());\n for(int j =0;j<datasets.size();j++){ \n sum_point += point_p_z_x(j,i)*datasets[j];\n }\n gauss_dis[i].center = sum_point/gauss_dis[i].Nk;\n for(int j =0;j<datasets.size();j++){ \n Eigen::Vector2d error_cov(datasets[j].x-gauss_dis[i].center.x,datasets[j].y-gauss_dis[i].center.y);\n sum_covMat += point_p_z_x(j,i)*error_cov*error_cov.transpose();\n }\n gauss_dis[i].covMat = sum_covMat/(double)gauss_dis[i].Nk;\n gauss_dis[i].pai_k = gauss_dis[i].Nk/datasets.size();\n }\n imshow(\"gmm\",image);\n waitKey();\n }\n //ๆ˜พ็คบ\n /* for(int i =0;i<1500;i++){\n if(gauss_dis[0].gauss_result(i,0)>0.01){\n circle(image,datasets[i],2,cv::Scalar(0, 0, 255));\n }\n }*/\n return 0;\n}\n" }, { "alpha_fraction": 0.5039578080177307, "alphanum_fraction": 0.5414248108863831, "avg_line_length": 36.156864166259766, "blob_id": "9f021b58ba6fa942ebf4ef09f7ed43310fda5bc7", "content_id": "a522c20188409f558927bf4abc0b6f4129a55c0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3846, "license_type": "no_license", "max_line_length": 86, "num_lines": 102, "path": "/SpectralClustering.py", "repo_name": "qxc0014/Cluster", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\")\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom itertools import cycle, islice\nimport numpy as np\n\nfrom sklearn import datasets\n\ndef caldistance(x1, x2, sqrt_flag=False):\n res = np.sum((x1-x2)**2)\n if sqrt_flag:\n res = np.sqrt(res)\n return res\n\nnp.random.seed(1)\n#ๅˆ›ๅปบๆ•ฐๆฎ้›†\nprint('start generate datasets ...')\nn_samples = 1500\nnoisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,\n noise=.05)\nnoisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)\nblobs = datasets.make_blobs(n_samples=n_samples, random_state=8)\nno_structure = np.random.rand(n_samples, 2), None\n\n# Anisotropicly distributed data\nrandom_state = 170\nX, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)\ntransformation = [[0.6, -0.6], [-0.4, 0.8]]\nX_aniso = np.dot(X, transformation)\naniso = (X_aniso, y)\n\n# blobs with varied variances\nvaried = datasets.make_blobs(n_samples=n_samples,\n cluster_std=[1.0, 2.5, 0.5],\n random_state=random_state)\nprint('datasets generated over')\ndefault_base = {'quantile': .3,\n 'eps': .3,\n 'damping': .9,\n 'preference': -200,\n 'n_neighbors': 10,\n 'n_clusters': 3,\n 'min_samples': 20,\n 'xi': 0.05,\n 'min_cluster_size': 0.1}\ndatasets = [\n (noisy_circles, {'damping': .77, 'preference': -240,\n 'quantile': .2, 'n_clusters': 2,\n 'min_samples': 20, 'xi': 0.25}),\n (noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),\n (varied, {'eps': .18, 'n_neighbors': 2,\n 'min_samples': 5, 'xi': 0.035, 'min_cluster_size': .2}),\n (aniso, {'eps': .15, 'n_neighbors': 2,\n 'min_samples': 20, 'xi': 0.1, 'min_cluster_size': .2}),\n (blobs, {}),\n (no_structure, {})]\n#ๆž„้€ ่ท็ฆป็Ÿฉ้˜ต\nfor i_dataset, (dataset, algo_params) in enumerate(datasets):\n params = default_base.copy()\n params.update(algo_params)\n dataset = np.array(dataset[0])\n dis = np.zeros((len(dataset), len(dataset)))\n for i in range(len(dataset)):\n for j in range(i+1, len(dataset)):\n dis[i][j] = 1.0 * caldistance(dataset[i], dataset[j])\n dis[j][i] = dis[i][j]\n #ๆž„้€ ็›ธไผผๅบฆ็Ÿฉ้˜ต\n xsize = len(dis)\n sim = np.zeros((xsize,xsize))\n for i in range(xsize):\n index = zip(dis[i], range(xsize))\n index = sorted(index, key=lambda x:x[0])\n knn_index = [index[w][1] for w in range(13)] \n for j in knn_index:\n if j!=i:\n sim[i][j] = 10./dis[i][j]\n sim[j][i] = sim[i][j] \n #ๆž„้€ ๆ‹‰ๆ™ฎๆ‹‰ๆ–ฏ็Ÿฉ้˜ต\n DMatrix = np.sum(sim, axis=1)\n LMatrix = np.diag(DMatrix) - sim\n #sqrtDegreeMatrix = np.diag(1.0 / (DMatrix ** (0.5)))\n #LMatrix = np.dot(np.dot(sqrtDegreeMatrix, LMatrix), sqrtDegreeMatrix)\n #svdๅˆ†่งฃ\n U, V = np.linalg.eig(LMatrix)\n U = zip(U, range(len(U)))\n U = sorted(U, key=lambda U:U[0])\n H = np.vstack([V[:,i] for (v, i) in U[:n_samples]]).T\n H = H[:,0:params['n_clusters']]\n print(H)\n #kmeans\n kmeans = KMeans(n_clusters=params['n_clusters'],init='k-means++').fit(H) \n y_pred = kmeans.labels_.astype(np.int)\n colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',\n '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00']),\n int(max(y_pred) + 1))))\n plt.subplot(111)\n plt.scatter(dataset[:,0], dataset[:,1], s=10, color=colors[y_pred])\n plt.title(\"Spectral Clustering\")\n plt.show()\n" }, { "alpha_fraction": 0.5635663270950317, "alphanum_fraction": 0.582186758518219, "avg_line_length": 37.52296829223633, "blob_id": "fcc42ebf8fd084dab0be07dd49aacf553b525aa4", "content_id": "b25f611bd640732cb64ffe075fcca6cad90300dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11854, "license_type": "no_license", "max_line_length": 147, "num_lines": 283, "path": "/Spectral_clustreing/Spectral_clustreing.cpp", "repo_name": "qxc0014/Cluster", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<fstream>\n#include<vector>\n#include<opencv2/core.hpp>\n#include<opencv2/opencv.hpp>\n#include<opencv2/highgui/highgui.hpp>\n#include<Eigen/Core>\n#include<Eigen/Dense>\nusing namespace std;\nusing namespace cv;\nconst float min_extent = 1;\nconst float com_float = 2e6;\nconst int k = 2;//่š็ฑปๆˆ2\nstring points_list = \"/home/esoman/c++code/c++/Spectral_clustreing/circle.txt\";\nclass QuadNode{\n public:\n QuadNode(){}\n QuadNode(QuadNode* child_root,Point2d center,float extent,vector<int> points_index,bool is_leaf)\n :extent_(extent),points_index_(points_index),is_leaf_(is_leaf),center_(center){\n // for(int i=0;i<8;i++)\n // child_root_[i] = new Octant();\n }\n int depth_=0;//่Š‚็‚น็š„ๆทฑๅบฆ\n QuadNode* child_root_[8]={nullptr,nullptr,nullptr,nullptr};//ๅญ˜ๅ››ไธชๅญ็ซ‹ๆ–นไฝ“็š„ๆŒ‡้’ˆ\n Point2d center_;//ๅฝ“ๅ‰็ซ‹ๆ–นไฝ“็š„ไธญๅฟƒๅๆ ‡\n float extent_;//ๅฝ“ๅ‰ๆญฃๆ–นไฝ“็š„ๅŠ่พน้•ฟ\n vector<int> points_index_;//ๅฝ“ๅ‰็ซ‹ๆ–นไฝ“็š„ๅŒ…ๅซ็‚น็š„Index\n bool is_leaf_;//ๅฝ“ๅ‰ๅๆ ‡ๆ˜ฏๅฆไธบๅถๅญ\n\n};\nclass distindex{\n public:\n distindex(float dist_,int index_):dist(dist_),index(index_){}\n float dist;\n int index;\n};\nclass result{\n public:\n result(float worst_dis_):worst_dis(worst_dis_){}//็”จไบŽๆœ็ดขไธ€ไธช่ฟ‘้‚ป็‚น\n result(float worst_dis_,int k):worst_dis(worst_dis_),worst_dis_cap(vector<distindex>(k,distindex(worst_dis_,-1))),size(k){ \n }\n float worst_dis=0;\n int index;\n int num=0;\n int size;\n vector<distindex> worst_dis_cap;\n void add_point(float bias,int node_index);\n};\nvoid result::add_point(float bias,int node_index){\n if(num != this->size) num++;//ๅทฒๆ’ๅ…ฅๅ€ผ็š„ไธชๆ•ฐ\n if(bias >= worst_dis_cap[this->size-1].dist) return;//ๅคงไบŽๆœ€ๅคงๅ€ผ็›ดๆŽฅ่ทณๅ‡บ\n int i = num-1;//ๅทฒ็ปๆ’ๅ…ฅๆœ€ๅคงๅ€ผ็š„index\n while(i>0){\n if(bias < worst_dis_cap[i-1].dist){\n this->worst_dis_cap[i] = worst_dis_cap[i-1];\n i--;\n }else{\n break;\n }\n }\n worst_dis_cap[i].dist = bias;\n worst_dis_cap[i].index = node_index;\n this->worst_dis = worst_dis_cap[this->size-1].dist;\n}\nQuadNode* build_Quadtree(QuadNode* root,vector<Point2d>* db,Point2d center,float extent,vector<int> points_index){\n if(points_index.size() == 0) {\n return nullptr;\n }\n if(root == nullptr){\n // cout << \"่Š‚็‚นๆทฑๅบฆ๏ผš\" << depth << \"่Š‚็‚นๅฎฝๅบฆ\" << width << endl;\n root = new QuadNode(nullptr,center,extent,points_index,true);\n }\n if(extent < min_extent && points_index.size()<=1){\n root->is_leaf_ = true;//ๅถๅญ่Š‚็‚น\n }else{\n root->is_leaf_ = false;//ไธๆ˜ฏๅถๅญ\n vector<vector<int>> child_point_index(4);\n for(auto point_idx:points_index){\n int Coordinate = 0;\n if((*db)[point_idx].x > center.x){\n Coordinate = Coordinate | 1;\n }\n if((*db)[point_idx].y > center.y){\n Coordinate = Coordinate | 2;\n }\n child_point_index[Coordinate].push_back(point_idx);\n }\n float factor[2] = {-0.5,0.5};\n vector<Point2d> child_center(4);\n float child_extent=0;\n for(int i = 0;i < 4;i++){\n child_center[i].x = center.x + factor[(i&1)>0]*extent;\n child_center[i].y = center.y + factor[(i&2)>0]*extent;\n child_extent = 0.5 *extent;\n //cout << child_extent << endl;\n root->child_root_[i] = build_Quadtree(root->child_root_[i],db,child_center[i],child_extent,child_point_index[i]);\n } \n }\n return root;\n} \n//ๅˆคๆ–ญ็ƒไธŽ็ซ‹ๆ–นไฝ“็š„ๆ–นไฝ\nbool overlap(QuadNode* root,Point2d Point,float worst_dis){\n //ๅˆ†ไธ‰็งๆƒ…ๅ†ต:\n //็ฌฌไธ€็ง:็ƒไธŽ็ซ‹ๆ–นไฝ“ๆฒกๆœ‰ๆŽฅ่งฆ,ๅช่ฆๆŠ•ๅฝฑ็š„ๆŸไธชๆ–นๅ‘ๆปก่ถณๅฐฑๅฏไปฅ\n float xyz[2];\n xyz[0] = fabs(root->center_.x - Point.x);\n xyz[1] = fabs(root->center_.y - Point.y);\n float max_dis = (root->extent_+ worst_dis);\n if( xyz[0] > max_dis || xyz[1] > max_dis ) return false;\n //็ฌฌไบŒ็ง:็ƒไธŽ็ซ‹ๆ–นไฝ“็›ธไบค๏ผˆ้€š่ฟ‡ๆŠ•ๅฝฑๅˆคๆ–ญ๏ผ‰่‡ณๅฐ‘ๆœ‰ไธคไธชๆŠ•ๅฝฑ้ขๅŒ…ๅซไบ†ๅœ†ๅฟƒๅฐฑๅฏไปฅ่ฎคไธบๆ˜ฏ็›ธไบค\n if(((xyz[0]<root->extent_)+(xyz[1]<root->extent_))>=1) return true;\n //็ฌฌไธ‰็ง:่กฅๅ……็ฌฌไบŒ็ง๏ผŒๅœจ่พน็•Œๅค„็›ธไบคไธๆปก่ถณ็ฌฌไบŒ็ง\n float x = (xyz[0]-root->extent_)>0?(xyz[0]-root->extent_):0;\n float y = (xyz[1]-root->extent_)>0?(xyz[1]-root->extent_):0;\n if(x*x+y*y<worst_dis*worst_dis) return true;\n}\n//ๅˆคๆ–ญ็ƒๆ˜ฏๅฆๅœจ็ซ‹ๆ–นไฝ“ๅ†…\nbool inside(QuadNode* root,Point2d Point,float worst_dis){\n float xyz[2];\n xyz[0] = fabs(root->center_.x - Point.x);\n xyz[1] = fabs(root->center_.y - Point.y);\n float max_dis = (root->extent_ - worst_dis);\n return ((xyz[0] < max_dis) && (xyz[1] < max_dis));\n\n}\nbool Quadtree_knn_search(QuadNode* root,vector<Point2d>* db,Point2d Point,result &a){\n //ๅ…ˆๅˆคๆ–ญๅฝ“ๅ‰rootๆ˜ฏๅฆไธบ็ฉบๆŒ‡้’ˆ\n if(root == nullptr) return false;\n //ๅˆคๆ–ญๅฝ“ๅ‰็š„่Š‚็‚นๆ˜ฏๅฆไธบๅถๅญ\n if((root->is_leaf_ == true) && root->points_index_.size() == 1){\n //่ฎก็ฎ—worst_dis\n //cout << \"ๆ‰พๅˆฐๅถๅญ๏ผ\" << endl;\n Eigen::Vector2d radius(Point.x - (*db)[root->points_index_[0]].x,\n Point.y - (*db)[root->points_index_[0]].y);\n float dis = radius.squaredNorm();\n a.add_point(dis,root->points_index_[0]);\n //a.worst_dis = a.worst_dis < dis? a.worst_dis:dis;\n //ๅˆคๆ–ญ็Žฐๅœจ็š„็ƒๆ˜ฏๅฆๅœจ็ซ‹ๆ–นไฝ“ๅ†…๏ผŒๅฆ‚ๆžœๅœจๅฏไปฅๆๅ‰็ปˆๆญข\n bool q = inside(root,Point,a.worst_dis);\n // cout << a.worst_dis_cap[0].dist << endl;\n return q;\n }\n //ๅˆคๆ–ญ็›ฎๆ ‡็‚นๆ‰€ๅฑž่ฑก้™\n int Coordinate = 0;\n if(Point.x > root->center_.x){\n Coordinate = Coordinate | 1;\n }\n if(Point.y > root->center_.y){\n Coordinate = Coordinate | 2;\n }\n //่ฟญไปฃๅฏปๆ‰พๆ–ฐ็š„ๅญ่ฑก้™\n if(Quadtree_knn_search(root->child_root_[Coordinate],db,Point,a)) return true;\n //ๅฝ“ๅ‘็Žฐๆœ€่ฟ‘็š„ๅญ่ฑก้™้ƒฝไธ่ƒฝๅฎŒๅ…จๅŒ…่ฃนๆœ€ๅ่ท็ฆป๏ผŒ้‚ฃไนˆๅฐฑ่ฆๆ‰ซๆๅ…ถไป–็š„ๅญ่ฑก้™\n for(int i = 0;i<4;i++){\n //ๅ…ˆๆŽ’้™คๅˆšๆ‰ๅทฒ็ปๆ‰ซๆ่ฟ‡็š„่ฑก้™\n if(i == Coordinate || root->child_root_[i] == nullptr) continue;\n //ๅ†ๆŽ’้™ค็ƒไธŽ็ซ‹ๆ–นไฝ“ไธ็›ธไบค็š„ๆƒ…ๅ†ต\n //cout << i << endl;\n if(false == overlap(root->child_root_[i],Point,a.worst_dis)) continue;\n //ๆœ€ๅŽๅฏน่ฟ™ไธช่ฑก้™่ฟ›่กŒ่ฎก็ฎ—worst_dis\n if(Quadtree_knn_search(root->child_root_[i],db,Point,a)) return true;\n }\n\n //ๅ†ๆฌกๅˆคๆ–ญ็Žฐๅœจ็š„็ƒๆ˜ฏๅฆๅœจ็ซ‹ๆ–นไฝ“ๅ†…๏ผŒๅฆ‚ๆžœๅœจๅฏไปฅๆๅ‰็ปˆๆญข\n return inside(root,Point,a.worst_dis);\n}\nint main(int argc, char const *argv[])\n{\n //่ฏปๅ–ๆ•ฐๆฎ\n ifstream fin;\n fin.open(points_list,ios::in);\n string buff;\n vector<Point2d> datasets;\n while (getline(fin, buff))\n {\n Point2d points;\n char *s_input = (char *)buff.c_str();\n const char * split = \",\";\n char *p = strtok(s_input, split);\n double a,b;\n\t\ta = atof(p);\n points.x = a*50+300;\n\t\tp = strtok(NULL, split);\n b = atof(p);\n points.y = b*50+300;\n datasets.push_back(points);\n }\n //็ป˜ๅˆถๆ‰€ๆœ‰ๆ•ฐๆฎ็‚น\n Mat image(600, 600, CV_8UC3, cv::Scalar(255, 255, 255));\n for(int i =0;i<datasets.size();i++){\n cv::circle(image,datasets[i],1,cv::Scalar(255, 255, 255));\n }\n //Spectral_clustreingๅˆๅง‹ๅŒ–\n //ๅปบๆ ‘\n float x_min_dataset,x_max_dataset,y_min_dataset,y_max_dataset;\n x_min_dataset = com_float;\n x_max_dataset = -com_float;\n y_min_dataset = com_float;\n y_max_dataset = -com_float;\n vector<int> point_index_dataset(datasets.size());\n std::partial_sum(point_index_dataset.begin(), point_index_dataset.end(), point_index_dataset.begin(), [](const int&a, int b) {return a + 1;});\n for(auto point:datasets){\n x_min_dataset = x_min_dataset < point.x?x_min_dataset:point.x;\n x_max_dataset = x_max_dataset > point.x?x_max_dataset:point.x;\n y_min_dataset = y_min_dataset < point.y?y_min_dataset:point.y;\n y_max_dataset = y_max_dataset > point.y?y_max_dataset:point.y;\n }\n Point2d dataset_center_point((x_min_dataset+x_max_dataset)/2.,(y_min_dataset+y_max_dataset)/2.);\n float dataset_extent = (x_max_dataset-x_min_dataset)>(y_max_dataset-y_min_dataset)?(x_max_dataset-x_min_dataset):(y_max_dataset-y_min_dataset);\n dataset_extent = ceil(0.5*dataset_extent);\n QuadNode* dataset_root=nullptr;\n dataset_root = build_Quadtree(dataset_root,&datasets,dataset_center_point,dataset_extent,point_index_dataset);\n Eigen::MatrixXd dis_matrix(datasets.size(),datasets.size()); \n Eigen::MatrixXd W_matrix(datasets.size(),datasets.size()); \n Eigen::MatrixXd D_matrix(datasets.size(),datasets.size()); \n Eigen::MatrixXd L_matrix(datasets.size(),datasets.size()); \n // W_matrix = Eigen::MatrixXd::Zero();\n for(int i =0;i<datasets.size();i++){\n result b(2e6,13);\n Quadtree_knn_search(dataset_root,&datasets,datasets[i],b);//ๅฏนๆ‰€ๆœ‰็š„็‚น่ฟ›่กŒknnๆœ็ดข\n for( auto knn_result : b.worst_dis_cap){\n if(knn_result.index>i)\n W_matrix(i,knn_result.index) = exp(-knn_result.dist);\n W_matrix(knn_result.index,i) = W_matrix(i,knn_result.index);\n }\n D_matrix(i,i) = W_matrix.row(i).sum();\n }\n L_matrix = D_matrix - W_matrix;\n Eigen::JacobiSVD<Eigen::MatrixXd> svd(L_matrix,Eigen::ComputeThinU | Eigen::ComputeThinV);\n Eigen::MatrixXd U = svd.matrixU();\n Eigen::MatrixXd small_vector = U.block<2,1500>(0,1497);\n cout << \"small_vector:\"<< small_vector << endl;\n vector<Point2d> pointsets;\n for(int k=0;k<U.rows();k++){\n Point2d poi(small_vector(k,0),small_vector(k,1));\n pointsets.push_back(poi);\n }\n //k-meansๅˆๅง‹ๅŒ–\n //้šๆœบ้€‰ๅ–ไธคไธช็‚นไฝœไธบๅˆๅง‹ไธญๅฟƒ็‚น\n int center_index[k];\n srand(time(0));\n center_index[0] = rand()%pointsets.size();\n while((center_index[1] = rand()%pointsets.size())==center_index[0]){\n //cout << \"1\" << center_index[1] << endl;\n //cout << \"2\"<< center_index[2] << endl;\n }\n vector<Point2d> center={pointsets[center_index[0]],pointsets[center_index[1]]};\n //้ๅކๆฏไธช็‚นๆ‰พๆœ€่ฟ‘็š„ไธญๅฟƒ็‚น\n vector<vector<Point2d>> last_kmean_class(k);//ไธŠๆฌก็š„k่š็ฑปไธญ็š„็‚น้›†\n while(1){\n vector<vector<Point2d>> kmean_class(k);//ๅญ˜ๆ”พไธๅŒ็ฑป็š„็‚น็š„index\n kmean_class.clear();\n for(int i=0;i<pointsets.size();i++){\n Eigen::Vector2d vec0(pointsets[i].x-center[0].x,pointsets[i].y-center[0].y);\n Eigen::Vector2d vec1(pointsets[i].x-center[1].x,pointsets[i].y-center[1].y);\n if(vec0.squaredNorm() < vec1.squaredNorm()){\n kmean_class[0].push_back(pointsets[i]);\n cv::circle(image,datasets[i],2,cv::Scalar(0, 0, 255));//็บข่‰ฒ\n }else{\n kmean_class[1].push_back(pointsets[i]);\n cv::circle(image,datasets[i],2,cv::Scalar(255, 0, 0));//็บข่‰ฒ\n }\n //cv::circle(image,datasets[i],2,cv::Scalar(0, 0, 255));\n }\n // cout << \"class1:\" << kmean_class[0] << endl;\n // cout << \"class2:\" << kmean_class[1] << endl; \n for(int i=0;i<k;i++){\n Point2d sum(0,0);\n for(auto point_:kmean_class[i])\n sum +=point_;\n center[i] = sum / (int)kmean_class[i].size();\n }\n // if(kmean_class == last_kmean_class && center == last_center){//ไธญๅฟƒ็‚นไธๅ˜ใ€Kไธช็ฑปไธญ็‚นไธๅ˜ๅฐฑๅœๆญข\n // break;\n // }\n imshow(\"spectral_clustreing\",image);\n waitKey();\n last_kmean_class = kmean_class;\n // last_center = center;\n }\n //ๆ˜พ็คบ\n return 0;\n}\n" }, { "alpha_fraction": 0.7632135152816772, "alphanum_fraction": 0.7780126929283142, "avg_line_length": 35.46154022216797, "blob_id": "8dd26b84b0a25f920ac9290ba985d7b73cd94dc8", "content_id": "d8765de0493bf54e00ed7cf06e3708ef4cb23409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 473, "license_type": "no_license", "max_line_length": 66, "num_lines": 13, "path": "/K-means/CMakeLists.txt", "repo_name": "qxc0014/Cluster", "src_encoding": "UTF-8", "text": "cmake_minimum_required( VERSION 2.8 )\nproject( kmeans )\nset(CMAKE_BUILD_TYPE \"Release\")\nSET( CMAKE_CXX_STANDARD 14)\nSET( CMAKE_CXX_STANDARD_REQUIRED ON)\nfind_package(OpenCV 3 REQUIRED)\ninclude_directories(${OpenCV_INCLUDE_DIRS})\nfind_package(PCL 1.9 REQUIRED)\ninclude_directories(${PCL_INCLUDE_DIRS})\nlink_libraries(${PCL_LIBRARY_DIRS})\nadd_definitions(${PCL_DEFINITIONS})\nadd_executable(kmeans kmeans.cpp)\ntarget_link_libraries(kmeans ${OpenCV_LIBRARIES} ${PCL_LIBRARIES})" } ]
4
YsY-gc/dla_semantic_seg
https://github.com/YsY-gc/dla_semantic_seg
5864c9fca3f7b11f162b2faae38f84e839e7a081
4a39538cc651dc941e96dbe16e48cbee4e7f22a4
8b92f7b90ca6732cf95fa6acc9f16b9c39f0baa2
refs/heads/master
2023-01-20T13:18:35.457109
2020-11-23T07:59:23
2020-11-23T07:59:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6532291173934937, "alphanum_fraction": 0.6725627183914185, "avg_line_length": 40.20338821411133, "blob_id": "6c5bcc265c13f4af7e79570a5c25df8f847ab34f", "content_id": "27c44895041b091c570d3c2aa347ea6158465438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2431, "license_type": "no_license", "max_line_length": 114, "num_lines": 59, "path": "/onnx_val.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport torch\nimport os\nimport argparse\nimport numpy as np\nfrom models.dla_up import DLASeg\nimport onnxruntime as ort\nfrom utils.config import Config, PALLETE\nimport cv2\nfrom utils.data_transforms import building_detect_aug\nimport pdb\n\nparser = argparse.ArgumentParser(description='Validation script for DLA Semantic Segmentation.')\nparser.add_argument('--trained_model', default='dla34_40000_0.01.pth', type=str, help='path to the trained model')\nparser.add_argument('--model', type=str, default='dla34', help='The model structure.')\nparser.add_argument('--dataset', type=str, default='buildings', help='The dataset for validation.')\nparser.add_argument('--img_in', type=str, default='1.tif', help='The dataset for validation.')\nparser.add_argument('--use_dcn', default=False, action='store_true', help='Whether to use DCN.')\nparser.add_argument('--onnx', default=False, action='store_true', help='Get onnx model.')\n\nargs = parser.parse_args()\ncfg = Config(args=args.__dict__, mode='Detect')\ncfg.show_config()\n\nmodel = DLASeg(cfg).cuda()\nmodel.load_state_dict(torch.load('weights/' + cfg.trained_model), strict=True)\nmodel.eval()\n\nimg_np = cv2.imread(cfg.img_in).astype('float32')\nimg_np, img_origin = building_detect_aug(img_np, onnx_mode=True)\nimg_tensor = torch.tensor(img_np, device='cuda').detach()\noutput = model(img_tensor)\n\nif not os.path.exists('dla_semantic.onnx'):\n torch.onnx.export(model,\n img_tensor, # model input (or a tuple for multiple inputs)\n \"dla_semantic.onnx\",\n verbose=True,\n # store the trained parameter weights inside the model file\n input_names=['input'],\n output_names=['output'],\n dynamic_axes={'input': {0: 'bs'}, 'output': {0: 'bs'}})\n\nsess = ort.InferenceSession('dla_semantic.onnx')\ninput_name = sess.get_inputs()[0].name\no_output = sess.run(None, {input_name: img_np}) # list\n\npred = torch.max(output, 1)[1].squeeze(0).cpu().numpy()\npred = PALLETE[pred].astype('uint8')\nfused = cv2.addWeighted(pred, 0.2, img_origin.astype('uint8'), 0.8, gamma=0)\ncv2.imshow(f'net result', fused)\ncv2.waitKey()\n\npred = np.argmax(o_output[0], axis=1)[0]\npred = PALLETE[pred].astype('uint8')\nfused = cv2.addWeighted(pred, 0.2, img_origin.astype('uint8'), 0.8, gamma=0)\ncv2.imshow(f'onnx result', fused)\ncv2.waitKey()\n" }, { "alpha_fraction": 0.45928463339805603, "alphanum_fraction": 0.5232115387916565, "avg_line_length": 37.08695602416992, "blob_id": "d8ee32a256c7bace00d332d6946ea5919847599d", "content_id": "6d3527f3f90198e662a5f44ba9cca7edc7bfea1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2628, "license_type": "no_license", "max_line_length": 119, "num_lines": 69, "path": "/utils/config.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport os\nimport numpy as np\nfrom utils.data_transforms import voc_train_aug, voc_val_aug, voc_detect_aug\nfrom utils.data_transforms import cityscapes_train_aug, cityscapes_val_aug\nfrom utils.data_transforms import building_train_aug, building_val_aug, building_detect_aug\n\nPASCAL_CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')\n\nos.makedirs('weights', exist_ok=True)\nos.makedirs('images', exist_ok=True)\nos.makedirs('results', exist_ok=True)\nos.makedirs('tensorboard_log', exist_ok=True)\n\nPALLETE = np.array([[0, 0, 0], [244, 0, 232], [20, 50, 170], [62, 102, 156],\n [190, 153, 153], [153, 153, 253], [250, 170, 30], [180, 220, 0],\n [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],\n [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],\n [0, 0, 230], [119, 11, 32], [40, 50, 140], [38, 19, 106]], dtype=np.uint8)\n\n\nclass Config:\n def __init__(self, args, mode):\n self.mode = mode\n\n for k, v in args.items():\n self.__setattr__(k, v)\n\n if self.mode == 'Train':\n self.momentum = 0.9\n self.decay = 0.0001\n\n if self.dataset == 'voc2012':\n self.class_num = 21\n if self.mode == 'Train':\n self.aug = voc_train_aug\n elif self.mode == 'Val':\n self.aug = voc_val_aug\n elif self.mode == 'Detect':\n self.aug = voc_detect_aug\n\n if self.dataset == 'cityscapes':\n self.class_num = 19\n self.aug = cityscapes_train_aug if self.mode == 'Train' else cityscapes_val_aug\n\n if self.dataset == 'buildings':\n self.class_num = 2\n if self.mode == 'Train':\n self.aug = building_train_aug\n elif self.mode == 'Val':\n self.aug = building_val_aug\n elif self.mode == 'Detect':\n self.aug = building_detect_aug\n\n def to_val_aug(self):\n if self.dataset == 'voc2012':\n self.aug = voc_val_aug\n elif self.dataset == 'cityscapes':\n self.aug = cityscapes_val_aug\n elif self.dataset == 'buildings':\n self.aug = building_val_aug\n\n def show_config(self):\n print('\\n' + '-' * 30 + f'{self.mode} cfg' + '-' * 30)\n for k, v in self.__dict__.items():\n print(f'{k}: {v}')\n print()\n" }, { "alpha_fraction": 0.6071006059646606, "alphanum_fraction": 0.6193293929100037, "avg_line_length": 37.40909194946289, "blob_id": "d6a34dc04e9b7c7451c709a6f8e9d7f8b553ec41", "content_id": "42f3aeef053e2943b85c61bb4522607acd34fe03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2535, "license_type": "no_license", "max_line_length": 114, "num_lines": 66, "path": "/detect.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport torch\nimport argparse\nfrom utils.dataset import Seg_dataset\nimport cv2\nimport time\nfrom utils import timer\nfrom utils.config import Config, PALLETE\nfrom models.dla_up import DLASeg\n\nparser = argparse.ArgumentParser(description='Detection script for DLA Semantic Segmentation.')\nparser.add_argument('--trained_model', type=str, default='', help='Path to the trained model')\nparser.add_argument('--model', type=str, default='dla34', help='The model structure.')\nparser.add_argument('--dataset', type=str, default='buildings', help='The dataset for validation.')\nparser.add_argument('--colorful', default=False, action='store_true', help='Whether to show the colorful result.')\nparser.add_argument('--overlay', default=False, action='store_true', help='Whether to show the overlay result.')\nparser.add_argument('--use_dcn', default=False, action='store_true', help='Whether to use DCN.')\n\nargs = parser.parse_args()\ncfg = Config(args=args.__dict__, mode='Detect')\ncfg.show_config()\n\ntest_dataset = Seg_dataset(cfg)\n\nmodel = DLASeg(cfg).cuda()\nmodel.load_state_dict(torch.load(cfg.trained_model), strict=True)\nmodel.eval()\n\ntimer.reset()\nwith torch.no_grad():\n for i, (data_tuple, img_name) in enumerate(test_dataset):\n if i == 1:\n timer.start() # timer does not timing for the first image.\n\n img_name = img_name.replace('tif', 'png')\n image = data_tuple[0].unsqueeze(0).cuda().detach()\n\n with timer.counter('forward'):\n output = model(image)\n\n with timer.counter('save result'):\n pred = torch.max(output, 1)[1].squeeze(0).cpu().numpy()\n\n if cfg.colorful:\n pred = PALLETE[pred].astype('uint8')\n cv2.imwrite(f'results/{img_name}', pred)\n if cfg.overlay:\n pred = PALLETE[pred].astype('uint8')\n original_img = data_tuple[1].astype('uint8')\n fused = cv2.addWeighted(pred, 0.2, original_img, 0.8, gamma=0)\n cv2.imwrite(f'results/{img_name}', fused)\n else:\n pred *= int(255 / cfg.class_num)\n cv2.imwrite(f'results/{img_name}', pred)\n\n time_this = time.time()\n if i > 0:\n batch_time = time_this - time_last\n timer.add_batch_time(batch_time)\n t_f = timer.get_times(['forward'])\n fps = 1 / t_f[0]\n print(f'\\r{i + 1}/{len(test_dataset)}, fps: {fps:.2f}', end='')\n time_last = time_this\n\nprint()\n" }, { "alpha_fraction": 0.6116462349891663, "alphanum_fraction": 0.6221541166305542, "avg_line_length": 34.968502044677734, "blob_id": "9903b7efb05a51e378344cb098cb35a77e71c937", "content_id": "b8fd3149b1b7c0db840e1a8240c3f1ce2563f65d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4568, "license_type": "no_license", "max_line_length": 113, "num_lines": 127, "path": "/train.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "import argparse\nfrom tensorboardX import SummaryWriter\nimport time\nimport datetime\nfrom val import validate\nimport torch\nimport torch.utils.data as data\nfrom torch import nn\nfrom utils.dataset import Seg_dataset\nfrom utils.utils import *\nfrom models.dla_up import DLASeg\nfrom models.unet import UNet\nfrom utils.config import Config\nfrom utils.radam import RAdam\nfrom utils import timer\n\nparser = argparse.ArgumentParser(description='Training script for DLA Semantic Segmentation.')\nparser.add_argument('--model', type=str, default='unet', help='The model structure.')\nparser.add_argument('--dataset', type=str, default='buildings', help='The dataset for training.')\nparser.add_argument('--bs', type=int, default=16, help='The training batch size.')\nparser.add_argument('--iter', type=int, default=30000, help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')\nparser.add_argument('--resume', type=str, default=None, help='The path of the latest checkpoint.')\nparser.add_argument('--lr_mode', type=str, default='poly', help='The learning rate decay strategy.')\nparser.add_argument('--use_dcn', default=False, action='store_true', help='Whether to use DCN.')\nparser.add_argument('--val_interval', type=int, default=500, help='The validation interval during training.')\nparser.add_argument('--optim', type=str, default='sgd', help='The training optimizer.')\nargs = parser.parse_args()\n\ncfg = Config(args=args.__dict__, mode='Train')\ncfg.show_config()\n\ntorch.backends.cudnn.benchmark = True\n\ntrain_dataset = Seg_dataset(cfg)\ntrain_loader = data.DataLoader(train_dataset, batch_size=cfg.bs, shuffle=True,\n num_workers=8, pin_memory=True, drop_last=False)\n\nif cfg.model == 'unet':\n model = UNet(input_channels=3).cuda()\n model.apply(model.weights_init_normal)\nelse:\n model = DLASeg(cfg).cuda()\nmodel.train()\n\nif cfg.resume:\n resume_epoch = int(cfg.resume.split('.')[0].split('_')[1]) + 1\n model.load_state_dict(torch.load('weights/' + cfg.resume), strict=True)\n print(f'Resume training with \\'{cfg.resume}\\'.')\nelse:\n resume_epoch = 0\n print('Training with ImageNet pre-trained weights.')\n\ncriterion = nn.CrossEntropyLoss(ignore_index=255).cuda()\nif cfg.optim == 'sgd':\n optimizer = torch.optim.SGD(model.optim_parameters(), cfg.lr, cfg.momentum, weight_decay=cfg.decay)\nelif cfg.optim == 'radam':\n optimizer = RAdam(model.optim_parameters(), lr=cfg.lr, weight_decay=cfg.decay)\n\niter_time = 0\ntimer.reset()\nwriter = SummaryWriter(f'tensorboard_log/{cfg.dataset}_{cfg.model}_{cfg.lr}')\n\ni = 0\ntraining = True\nwhile training:\n for img, label in train_loader:\n if i == 1:\n timer.start()\n\n lr = adjust_lr_iter(cfg, optimizer, i)\n\n img = img.cuda().detach()\n target = label.cuda().detach()\n\n with timer.counter('forward'):\n output = model(img)\n\n with timer.counter('loss'):\n loss = criterion(output, target)\n\n with timer.counter('backward'):\n optimizer.zero_grad()\n loss.backward()\n\n with timer.counter('update'):\n optimizer.step()\n\n time_this = time.time()\n if i > 0:\n batch_time = time_this - time_last\n timer.add_batch_time(batch_time)\n time_last = time_this\n\n if i > 0 and i % 10 == 0:\n time_name = ['batch', 'data', 'forward', 'loss', 'backward', 'update']\n t_t, t_d, t_f, t_l, t_b, t_u = timer.get_times(time_name)\n\n seconds = (cfg.iter - i) * t_t\n eta = str(datetime.timedelta(seconds=seconds)).split('.')[0]\n\n print(f'{i:3d} | loss: {loss:.4f} | t_total: {t_t:.3f} | t_data: {t_d:.3f} | t_forward: {t_f:.3f} | '\n f't_loss: {t_l:.3f} | t_backward: {t_b:.3f} | t_update: {t_u:.3f} | lr: {lr:.5f} | ETA: {eta}')\n\n if i > 0 and i % 100 == 0:\n writer.add_scalar('loss', loss, global_step=i)\n\n i += 1\n if i > cfg.iter:\n training = False\n\n if cfg.val_interval > 0 and i % cfg.val_interval == 0:\n save_name = f'{cfg.model}_{i}_{cfg.lr}.pth'\n torch.save(model.state_dict(), f'weights/{save_name}')\n print(f'Model saved as: {save_name}, begin validating.')\n timer.reset()\n\n cfg.mode = 'Val'\n cfg.to_val_aug()\n model.eval()\n miou = validate(model, cfg)\n model.train()\n\n writer.add_scalar('miou', miou, global_step=i)\n timer.start()\n\nwriter.close()\n" }, { "alpha_fraction": 0.5744069218635559, "alphanum_fraction": 0.5837526917457581, "avg_line_length": 39.31884002685547, "blob_id": "189bc59abb0641cdfa66c28956b4c7d3189fbc28", "content_id": "b5e00292946ebca8d6778fbd02e4a487b9e0e99d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2782, "license_type": "no_license", "max_line_length": 106, "num_lines": 69, "path": "/val.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom utils.dataset import Seg_dataset\nimport argparse\nfrom models.dla_up import DLASeg\nfrom utils.config import Config\nfrom utils.utils import confusion_matrix, per_class_iou\n\nparser = argparse.ArgumentParser(description='Validation script for DLA Semantic Segmentation.')\nparser.add_argument('--trained_model', default='', type=str, help='path to the trained model')\nparser.add_argument('--model', type=str, default='dla34', help='The model structure.')\nparser.add_argument('--dataset', type=str, default='buildings', help='The dataset for validation.')\nparser.add_argument('--use_dcn', default=False, action='store_true', help='Whether to use DCN.')\nparser.add_argument('--onnx', default=False, action='store_true', help='Get onnx model.')\n\n\ndef validate(model, cfg):\n torch.backends.cudnn.benchmark = True\n\n val_dataset = Seg_dataset(cfg)\n val_loader = data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)\n\n total_batch = int(len(val_dataset)) + 1\n hist = np.zeros((cfg.class_num, cfg.class_num))\n\n with torch.no_grad():\n for i, (img, label) in enumerate(val_loader):\n image = img.cuda().detach()\n output = model(image)\n pred = torch.max(output, 1)[1].cpu().numpy().astype('int32')\n label = label.numpy().astype('int32')\n\n hist += confusion_matrix(pred.flatten(), label.flatten(), cfg.class_num)\n ious = per_class_iou(hist) * 100\n miou = np.nanmean(ious)\n print(f'\\rBatch: {i + 1}/{total_batch}, mIOU: {miou:.2f}', end='')\n\n print('\\nPer class iou:')\n for i, iou in enumerate(ious):\n print(f'{i}: {iou:.2f}')\n\n return miou\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n cfg = Config(args=args.__dict__, mode='Val')\n cfg.show_config()\n\n model = DLASeg(cfg).cuda()\n model.load_state_dict(torch.load(cfg.trained_model), strict=True)\n model.eval()\n if cfg.onnx:\n net_in = torch.randn(4, 3, 128, 128, requires_grad=True).cuda()\n torch_out = torch.onnx.export(model, # model being run\n net_in, # model input (or a tuple for multiple inputs)\n \"dla.onnx\",\n verbose=True,\n # store the trained parameter weights inside the model file\n training=False,\n do_constant_folding=True,\n input_names=['input'],\n output_names=['output'])\n exit()\n\n validate(model, cfg)\n" }, { "alpha_fraction": 0.5648037791252136, "alphanum_fraction": 0.5945303440093994, "avg_line_length": 27.03333282470703, "blob_id": "818f6be8148b4019eeb822f428b81bf014c6e581", "content_id": "77ad98604f1bae4b4de50834d63e3c3a7a72dcce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "no_license", "max_line_length": 97, "num_lines": 30, "path": "/utils/utils.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport numpy as np\n\n\ndef adjust_lr_iter(cfg, optimizer, cur_iter):\n lr = cfg.lr * (1 - cur_iter / cfg.iter) ** 0.9\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n\n\ndef accuracy(output, target): # acc = TP / (TP + FP)\n _, pred = output.max(1)\n pred = pred.view(1, -1)\n target = target.view(1, -1)\n correct = pred.eq(target)\n correct = correct[target != 255].view(-1)\n score = correct.float().sum(0) / correct.size(0) * 100.0\n return score.item()\n\n\ndef confusion_matrix(pred, label, n):\n k = (label >= 0) & (label < n)\n return np.bincount(n * label[k] + pred[k], minlength=n ** 2).reshape(n, n)\n\n\ndef per_class_iou(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + np.finfo(np.float32).eps)\n" }, { "alpha_fraction": 0.4693492650985718, "alphanum_fraction": 0.5337944030761719, "avg_line_length": 31.294416427612305, "blob_id": "b18254fa896ef765f2aa8ac8c8e6f0ae16e675c0", "content_id": "1f78f9158ba7a7965f85a544d1209871da93b1c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6500, "license_type": "no_license", "max_line_length": 105, "num_lines": 197, "path": "/scripts/label_voc.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport os\nimport cv2\nimport numpy as np\nimport json\n\n\ndef initial():\n global img_path, img_name, label_path, img, label, trail, ori_pixels, \\\n img_copy, total_mask, instance_dict, instance_pixels, class_index\n\n img_path = imgs.pop(0).split()[0]\n img_name = img_path.split('/')[-1]\n print('\\n', img_name)\n label_path = '/home/feiyuhuahuo/Data/label_imgs/Train/' + img_name.replace('jpg', 'png')\n\n img = cv2.imread(img_path)\n label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)\n\n trail = [] # for drawing masks\n ori_pixels = [] # for undoing trails\n class_index = 0\n instance_dict = {} # for drawing instance labels\n instance_pixels = []\n img_copy = img.copy()\n total_mask = np.zeros((img.shape[0], img.shape[1]), dtype='uint8')\n\n\ndef on_mouse(event, x, y, flags, param):\n global trail, ori_pixels, img_copy\n\n if event == cv2.EVENT_MOUSEMOVE:\n if flags == cv2.EVENT_FLAG_CTRLKEY:\n if len(ori_pixels) == 0:\n ori_pixels.append([(y, x), img[y, x, :].copy()])\n\n elif (y, x) != ori_pixels[-1][0]:\n ori_pixels.append([(y, x), img[y, x, :].copy()]) # ่ฎฐๅฝ•่ฝจ่ฟนๅๆ ‡ๅ’Œๅฏนๅบ”ๅƒ็ด ๅ€ผ๏ผŒ็”จไบŽๅ›žๆ’ค่ฝจ่ฟน\n trail.append([x, y]) # ่ฎฐๅฝ•่ฝจ่ฟนๅๆ ‡, ็”จไบŽๅกซๅ……mask\n\n img[y, x, :] = (0, 0, 255)\n img_copy = img # img_copy should track the operation on the original img\n\n\ndef draw_mask(category):\n global trail, total_mask, class_index, r_mark\n\n class_index = category\n\n if trail:\n instance_pixels.append(trail)\n\n trail = np.array(trail)\n cv2.fillPoly(total_mask, [trail], (category, category, category))\n\n cur_mask = np.zeros((img.shape[0], img.shape[1]), dtype='uint8')\n cv2.fillPoly(cur_mask, [trail], (255, 255, 255))\n contours, hierarchy = cv2.findContours(cur_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(img, contours, -1, PALLETE[category].tolist(), thickness=1)\n\n trail = []\n else:\n print('No new trail.')\n\n\nvoc_labels = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow', 'dog', 'horse', 'motorbike',\n 'person', 'sheep', 'sofa', 'train', 'tvmonitor']\n\nPALLETE = np.array([[0, 0, 0], [244, 35, 232], [70, 70, 70], [102, 102, 156],\n [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],\n [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],\n [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],\n [0, 0, 230], [119, 11, 32]], dtype=np.uint8)\nPALLETE_with_255 = np.tile(PALLETE, (13, 1))\nfinal_255 = np.array([[255, 255, 255], [255, 255, 255], [255, 255, 255],\n [255, 255, 255], [255, 255, 255], [255, 255, 255],\n [255, 255, 255], [255, 255, 255], [255, 255, 255]])\nPALLETE_with_255 = np.concatenate((PALLETE_with_255, final_255), axis=0)\n\nfolder = 1\nwith open(f'/home/feiyuhuahuo/Data/{folder}/todo.txt') as f:\n imgs = f.readlines()\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.resizeWindow(\"image\", 960, 700)\ncv2.setMouseCallback('image', on_mouse)\n\nsemantic_label_folder = f'/home/feiyuhuahuo/Data/{folder}/semantic_label/'\ninstance_label_folder = f'/home/feiyuhuahuo/Data/{folder}/instance_label/'\nif not os.path.exists(semantic_label_folder):\n os.mkdir(semantic_label_folder)\nif not os.path.exists(instance_label_folder):\n os.mkdir(instance_label_folder)\n\ninitial()\n\nwhile True:\n cv2.imshow('image', img)\n k = cv2.waitKey(50)\n\n if k == 101: # ๆŒ‰Eๆ˜พ็คบๅŽŸๆ ‡ๆณจ\n img = PALLETE_with_255[label].astype('uint8')\n\n if k == 119: # ๆŒ‰Wๆ˜พ็คบๅทฒๆ ‡ๆณจ็š„mask\n img = PALLETE_with_255[total_mask].astype('uint8')\n\n if k == 113: # ๆŒ‰Qๆ˜พ็คบๅŽŸๅ›พ\n img = img_copy\n\n if k == 102: # ๆŒ‰Fไฟๅญ˜ๅนถ่ฟ›ๅ…ฅไธ‹ไธ€ๅผ ๅ›พ\n print(f'{len(instance_dict)} instances totally.')\n\n with open(f'/home/feiyuhuahuo/Data/{folder}/todo.txt', 'w') as f:\n f.writelines(imgs)\n\n if instance_dict:\n # save semantic labels, must be 'png' format.\n cv2.imwrite(semantic_label_folder + img_name.replace('jpg', 'png'), total_mask)\n # save instance labels\n json_path = instance_label_folder + img_name.replace('jpg', 'json')\n if os.path.exists(json_path):\n os.remove(json_path)\n\n with open(json_path, 'w', encoding='utf-8') as f:\n json.dump(instance_dict, f)\n else:\n print('Pass this picture.')\n\n initial()\n\n if k == 100: # ๆŒ‰Dๅ›žๆ’ค3ไธช็‚น\n try:\n for i in range(3):\n last_pixel = ori_pixels.pop()\n trail.pop()\n img[last_pixel[0][0], last_pixel[0][1], :] = last_pixel[1]\n except:\n pass\n\n if k == 114: # ๆŒ‰Rๅญ˜ๅ‚จๅฝ“ๅ‰ๅฎžไพ‹mask\n # get the instance number of the corresponding category\n if len(instance_dict):\n class_num = [aa for aa in instance_dict.keys() if f'{class_index}-' in aa]\n if class_num:\n class_num = [int(aa.split('-')[-1]) for aa in class_num]\n class_num.sort()\n class_num = class_num[-1] + 1\n else:\n class_num = 1\n else:\n class_num = 1\n\n instance_dict[f'{class_index}-{class_num}'] = instance_pixels\n print(f'{list(instance_dict)[-1]}, {voc_labels[class_index]}, {len(instance_pixels)} parts, OK.')\n instance_pixels = []\n\n if k == 53: # 5\n draw_mask(1)\n if k == 54: # 6\n draw_mask(2)\n if k == 55: # 7\n draw_mask(3)\n if k == 56: # 8\n draw_mask(4)\n if k == 57: # 9\n draw_mask(5)\n if k == 116: # t\n draw_mask(6)\n if k == 121: # y\n draw_mask(7)\n if k == 117: # u\n draw_mask(8)\n if k == 105: # i\n draw_mask(9)\n if k == 111: # o\n draw_mask(10)\n if k == 103: # g\n draw_mask(11)\n if k == 104: # h\n draw_mask(12)\n if k == 106: # j\n draw_mask(13)\n if k == 107: # k\n draw_mask(14)\n if k == 108: # l\n draw_mask(15)\n if k == 98: # b\n draw_mask(16)\n if k == 110: # n\n draw_mask(17)\n if k == 109: # m\n draw_mask(18)\n\n if k == 27: # esc\n break\n" }, { "alpha_fraction": 0.6994219422340393, "alphanum_fraction": 0.7109826803207397, "avg_line_length": 16.299999237060547, "blob_id": "d1099bfd91fca960d9fe315b5cb553aff5a860fc", "content_id": "476d0c3d1b06dae8b2f9814d5e5ea59b2d404342", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/test~~.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport glob\nimport cv2\n\nimport torch\nprint(torch.__version__)\n\nprint(torch.version.cuda)\nprint(torch.backends.cudnn.version())\n" }, { "alpha_fraction": 0.563023030757904, "alphanum_fraction": 0.596925675868988, "avg_line_length": 28.87244987487793, "blob_id": "7be466e4732325001f97d5b64268725b6763275a", "content_id": "0196643453a48a2c010d035b5269d5620fa8e93c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11710, "license_type": "no_license", "max_line_length": 119, "num_lines": 392, "path": "/utils/data_transforms.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport cv2\nimport torch\n\n\ndef RandomScale(img, label, scale_range): # Keeping ratio scale along the image long side.\n img_h, img_w, _ = img.shape\n assert (img_h, img_w) == label.shape[:2], 'img.shape != label.shape in data_transforms.RandomScale'\n\n long_size = max(img_h, img_w)\n new_size = np.random.randint(scale_range[0], scale_range[1]) * 32\n ratio = new_size / long_size\n\n new_w = int(((img_w * ratio) // 32 + 1) * 32)\n new_h = int(((img_h * ratio) // 32 + 1) * 32)\n\n img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n\n return img, label\n\n\ndef cityscapes_crop(img, label, crop_range):\n crop_h = np.random.randint(crop_range[0], crop_range[1]) * 32\n crop_w = crop_h * 2\n\n img_h, img_w, _ = img.shape\n if crop_h < img_h:\n y0 = np.random.randint(0, img_h - crop_h)\n x0 = np.random.randint(0, img_w - crop_w)\n\n img = img[y0: y0 + crop_h, x0: x0 + crop_w, :]\n label = label[y0: y0 + crop_h, x0: x0 + crop_w]\n\n return img, label\n\n\ndef FixCrop(img, label):\n pad_size = 22 * 32\n crop_size = 512\n img_h, img_w, _ = img.shape\n\n pad_img = np.random.rand(pad_size, pad_size, 3) * 255 # pad to self.pad_size\n pad_label = np.ones((pad_size, pad_size)) * 255\n pad_img = pad_img.astype('float32')\n pad_label = pad_label.astype('float32')\n\n y0 = (pad_size - img_h) // 2\n x0 = (pad_size - img_w) // 2\n pad_img[y0: y0 + img_h, x0: x0 + img_w, :] = img\n pad_label[y0: y0 + img_h, x0: x0 + img_w] = label\n\n crop_y0 = np.random.randint(0, pad_size - crop_size) # crop to self.crop_size\n crop_x0 = np.random.randint(0, pad_size - crop_size)\n\n img = pad_img[crop_y0: crop_y0 + crop_size, crop_x0: crop_x0 + crop_size, :]\n label = pad_label[crop_y0: crop_y0 + crop_size, crop_x0: crop_x0 + crop_size]\n\n return img, label\n\n\ndef PadToSize(img, label):\n pad_h = 19 * 32\n pad_w = pad_h * 2\n img_h, img_w, _ = img.shape\n\n if img_h < pad_h:\n pad_img = np.random.rand(pad_h, pad_w, 3) * 255\n y0 = np.random.randint(0, pad_h - img_h)\n x0 = np.random.randint(0, pad_w - img_w)\n pad_img[y0: y0 + img_h, x0: x0 + img_w, :] = img\n\n assert (img_h, img_w) == label.shape[:2], 'img.shape != label.shape in data_transforms.PadToSize'\n pad_label = np.ones((pad_h, pad_w)) * 255\n pad_label[y0: y0 + img_h, x0: x0 + img_w] = label\n return pad_img, pad_label\n\n return img, label\n\n\ndef PadIfNeeded(img, label):\n pad_to = 512\n img_h, img_w, _ = img.shape\n long_size = max(img_h, img_w)\n\n ratio = pad_to / long_size\n\n new_w = int(img_w * ratio)\n new_h = int(img_h * ratio)\n\n img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n pad_img = np.zeros((pad_to, pad_to, 3)) + (123.675, 116.280, 103.530)\n pad_img = pad_img.astype('float32')\n pad_img[0: new_h, 0: new_w, :] = img\n\n label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n pad_label = np.ones((pad_to, pad_to)) * 255\n pad_label = pad_label.astype('float32')\n pad_label[0: new_h, 0: new_w] = label\n return pad_img, pad_label\n\n\ndef normalize(img):\n # for i in range(3):\n # img[:, :, i] = (img[:, :, i] - np.mean(img[:, :, i])) / np.std(img[:, :, i])\n img = (img - 128.) / 128.\n\n return img\n\n\ndef to_tensor(img, label=None, onnx_mode=False):\n img = np.transpose(img[..., (2, 1, 0)], (2, 0, 1)) # To RGB, to (C, H, W).\n if onnx_mode:\n return img[None, :]\n img = torch.tensor(img, dtype=torch.float32)\n if label is not None:\n label = torch.tensor(label, dtype=torch.int64) # Label must be int64 because of nn.NLLLoss.\n return img, label\n\n return img\n\n\ndef SpecifiedResize(img, label): # Keeping ratio resize with a specified length along the image long side.\n resize_long = 1088\n img_h, img_w, _ = img.shape\n assert img.shape[:2] == label.shape[:2], 'img.shape != label.shape in data_transforms.SpecifiedResize'\n\n long_size = max(img_h, img_w)\n ratio = resize_long / long_size\n\n new_w = int(((img_w * ratio) // 32 + 1) * 32)\n new_h = int(((img_h * ratio) // 32 + 1) * 32)\n\n img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n\n return img, label\n\n\ndef nearest_resize(img, label=None): # Keeping ratio resize to the nearest multiple of 32 according to the image size.\n if label is not None:\n assert img.shape[0:2] == label.shape[0:2], 'shape mismatch in data_transforms.nearest_resize'\n\n img_h, img_w, _ = img.shape\n new_w = int((img_w // 32 + 1) * 32)\n new_h = int((img_h // 32 + 1) * 32)\n img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n if label is not None:\n label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n return img, label\n\n return img\n\n\ndef building_crop(img, label):\n crop_ratio = random.uniform(0.9, 1.0)\n h, w, _ = img.shape\n crop_h, crop_w = int(h * crop_ratio), int(w * crop_ratio)\n left = random.randint(0, w - crop_w)\n up = random.randint(0, h - crop_h)\n\n crop_img = img[up: up + crop_h, left: left + crop_w, :]\n crop_label = label[up: up + crop_h, left: left + crop_w]\n\n return crop_img, crop_label\n\n\ndef pad_to_square(img, label):\n h, w, _ = img.shape\n long_size = max(h, w)\n pad_img = np.random.rand(long_size, long_size, 3) * 255\n pad_img = pad_img.astype('float32')\n pad_label = np.ones((long_size, long_size), dtype='float32') * 255\n\n if h == w:\n return img, label\n if h < w:\n up = random.randint(0, w - h)\n pad_img[up: up + h, :, :] = img\n pad_label[up: up + h, :] = label\n if h > w:\n left = random.randint(0, h - w)\n pad_img[:, left: left + w, :] = img\n pad_label[:, left: left + w] = label\n\n return pad_img, pad_label\n\n\ndef random_contrast(img):\n alpha = random.uniform(0.8, 1.2)\n img *= alpha\n img = np.clip(img, 0., 255.)\n return img\n\n\ndef random_brightness(img):\n delta = random.uniform(-25, 25) # must between 0 ~ 255\n img += delta\n img = np.clip(img, 0., 255.)\n return img\n\n\ndef random_sharpening(img):\n if random.randint(0, 1):\n kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n img = cv2.filter2D(img, -1, kernel=kernel)\n img = np.clip(img, 0., 255.)\n return img\n\n\ndef random_blur(img):\n if random.randint(0, 1):\n size = random.choice((3, 5, 7))\n img = cv2.GaussianBlur(img, (size, size), 0)\n img = np.clip(img, 0., 255.)\n return img\n\n\ndef color_space(img, current, to):\n if current == 'BGR' and to == 'HSV':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n elif current == 'HSV' and to == 'BGR':\n img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n img = np.clip(img, 0., 255.)\n\n return img\n\n\ndef random_saturation(img):\n alpha = random.uniform(0.8, 1.2)\n img[:, :, 1] *= alpha\n return img\n\n\ndef random_hue(img):\n delta = 25.0\n assert 0.0 <= delta <= 360.0\n img[:, :, 0] += random.uniform(-delta, delta)\n img[:, :, 0][img[:, :, 0] > 360.0] -= 360.0\n img[:, :, 0][img[:, :, 0] < 0.0] += 360.0\n return img\n\n\ndef BGR_distortion(img):\n # random_contrast() and random_brightness() must be in front of some nonlinear operations\n # (e.g. random_saturation()), or they will not affect the normalize() operation.\n img = random_contrast(img)\n img = random_brightness(img)\n img = random_sharpening(img)\n img = random_blur(img)\n return img\n\n\ndef HSV_distortion(img):\n img = color_space(img, current='BGR', to='HSV')\n img = random_saturation(img) # Useless for grey images.\n img = random_hue(img) # Useless for grey images.\n img = color_space(img, current='HSV', to='BGR')\n return img\n\n\ndef color_distortion(img):\n if random.randint(0, 1):\n img = BGR_distortion(img)\n if random.randint(0, 1):\n img = HSV_distortion(img)\n\n return img\n\n\ndef random_flip(img, label, v_flip=False):\n # horizontal flip\n if random.randint(0, 1):\n img = cv2.flip(img, 1) # Don't use such 'image[:, ::-1]' code, may occur bugs.\n label = cv2.flip(label, 1)\n\n # vertical flip\n if v_flip and random.randint(0, 1):\n img = cv2.flip(img, 0)\n label = cv2.flip(label, 0)\n\n return img, label\n\n\ndef random_rotate(img, label, ninty_rotation=False):\n h, w, _ = img.shape\n # slight rotation first\n if random.randint(0, 1):\n angle = random.randint(-10, 10)\n\n if ninty_rotation:\n # 90 degrees rotation second\n angle += random.choice((0, 90, 180, 270))\n\n matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1.0)\n img = cv2.warpAffine(img, matrix, (w, h), borderValue=(0, 0, 0))\n label = cv2.warpAffine(label, matrix, (w, h), flags=cv2.INTER_NEAREST, borderValue=(255, 255, 255))\n\n return img, label\n\n\ndef direct_resize(img, label, final_size):\n img = cv2.resize(img, (final_size, final_size), interpolation=cv2.INTER_LINEAR)\n label = cv2.resize(label, (final_size, final_size), interpolation=cv2.INTER_NEAREST)\n return img, label\n\n\ndef cityscapes_train_aug(img, label):\n img, label = RandomScale(img, label, (24, 40))\n img, label = cityscapes_crop(img, label, (10, 22))\n img, label = random_flip(img, label, v_flip=False)\n img = color_distortion(img) # color_distortion() should be in front of random_rotate()\n img, label = random_rotate(img, label, ninty_rotation=False)\n img, label = PadToSize(img, label)\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef cityscapes_val_aug(img, label):\n img, label = SpecifiedResize(img, label)\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef voc_train_aug(img, label):\n img, label = RandomScale(img, label, (12, 22))\n img, label = FixCrop(img, label)\n img, label = random_flip(img, label, v_flip=False)\n img = color_distortion(img)\n img, label = random_rotate(img, label, ninty_rotation=False)\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef voc_val_aug(img, label):\n img, label = PadIfNeeded(img, label)\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef voc_detect_aug(img):\n img = nearest_resize(img)\n img = normalize(img)\n img, label = to_tensor(img)\n\n return img\n\n\ndef building_train_aug(img, label):\n assert img.shape[:2] == label.shape[:2], 'img.shape != label.shape in data_transforms.building_train_aug'\n\n img, label = building_crop(img, label)\n img, label = pad_to_square(img, label)\n img = color_distortion(img)\n img, label = random_flip(img, label, v_flip=True)\n img, label = random_rotate(img, label, ninty_rotation=True)\n img, label = direct_resize(img, label, 128)\n # img = img.astype('uint8')\n # label = label.astype('uint8') * 100\n # cv2.imshow('aa', img)\n # cv2.imshow('bb', label)\n # cv2.waitKey()\n # exit()\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef building_val_aug(img, label):\n img, label = nearest_resize(img, label)\n img = normalize(img)\n img, label = to_tensor(img, label)\n\n return img, label\n\n\ndef building_detect_aug(img, onnx_mode=False):\n img = nearest_resize(img)\n original_img = img.copy()\n norm_img = normalize(img)\n norm_img = to_tensor(norm_img, onnx_mode=onnx_mode)\n return norm_img, original_img\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.7006173133850098, "avg_line_length": 31.399999618530273, "blob_id": "adeb7e724efb7923084d3d434d79eac9aea7a9dd", "content_id": "d471e178c4414c2aa68cfad6362ec5625257acb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "no_license", "max_line_length": 82, "num_lines": 10, "path": "/README.md", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "Code can run with PyTorch1.7 now, and DCNv2, ONNX are OK. \n\ncd DCNv2 \n./make.sh \n\npython train.py --model=dla34 --dataset=buildings --bs=16 --iter=30000 --lr=0.01 \npython val.py --trained_model=weights/dla34_40000_0.01.pth \npython detect.py --trained_model=weights/dla34_40000_0.01.pth --overlay \n\n![Example 1](7.png)\n" }, { "alpha_fraction": 0.5960784554481506, "alphanum_fraction": 0.6098039150238037, "avg_line_length": 39.26315689086914, "blob_id": "67a6c58c164e243110b8287742fd514207565f17", "content_id": "51ceffd8be236fb4a0603a2a52eb682b2a0c8c0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 110, "num_lines": 38, "path": "/utils/dataset.py", "repo_name": "YsY-gc/dla_semantic_seg", "src_encoding": "UTF-8", "text": "import cv2\nimport glob\nimport torch.utils.data as data\n\n\nclass Seg_dataset(data.Dataset):\n def __init__(self, cfg):\n self.aug = cfg.aug\n self.mode = cfg.mode\n file = 'Train' if self.mode == 'Train' else 'Val'\n\n if cfg.dataset == 'voc2012':\n self.original_imgs = glob.glob(f'/home/feiyu/Data/VOC2012/original_imgs/{file}/*.jpg')\n self.label_imgs = glob.glob(f'/home/feiyu/Data/VOC2012/label_imgs/{file}/*.png')\n if cfg.dataset == 'cityscapes':\n self.original_imgs = glob.glob(f'/home/feiyu/Data/cityscapes_semantic/original_imgs/{file}/*.png')\n self.label_imgs = glob.glob(f'/home/feiyu/Data/cityscapes_semantic/label_imgs/{file}/*.png')\n if cfg.dataset == 'buildings':\n self.original_imgs = glob.glob(f'/home/feiyu/Data/building_semantic/original_imgs/{file}/*.tif')\n self.label_imgs = glob.glob(f'/home/feiyu/Data/building_semantic/label_imgs/{file}/*.tif')\n\n self.original_imgs.sort()\n self.label_imgs.sort()\n\n print('Dataset initialized.')\n\n def __getitem__(self, index):\n img = cv2.imread(self.original_imgs[index]).astype('float32')\n label = cv2.imread(self.label_imgs[index], cv2.IMREAD_GRAYSCALE).astype('float32')\n\n if self.mode != 'Detect':\n return self.aug(img, label)\n else:\n img_name = self.original_imgs[index].split('/')[-1]\n return self.aug(img), img_name\n\n def __len__(self):\n return len(self.original_imgs)\n" } ]
11
EyeSwipe/EyeSwipe
https://github.com/EyeSwipe/EyeSwipe
a4b25ee0e0e69e7a2a78471f989b3cb070d0bafb
83ffc784a51dcaae05bfeb166d4e1dddb4d95dc5
8087827a2a2435f01131c54afbafcc64cabe6648
refs/heads/master
2022-05-16T03:19:20.692393
2022-04-15T21:28:34
2022-04-15T21:28:34
207,735,798
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.768734872341156, "alphanum_fraction": 0.7727639079093933, "avg_line_length": 46.769229888916016, "blob_id": "698468109d640d25a2a8a020fefa42d94712644d", "content_id": "5a09ee2147d7b2889913b89fe4feecdfcd243f3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1241, "license_type": "permissive", "max_line_length": 99, "num_lines": 26, "path": "/Sentences/Sources/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Sources\n\nThis directory contains the original version of various files or sets of data currently in use. One\nsuch .zip file cannot be uploaded to github, as it is above the 100MB size limit at time of\nwriting. It is still mentioned, however.\n\n##### Links\n\nHere are the original links to download the files, in order:\n\n* https://www.wordfrequency.info/free.asp\n* https://lexically.net/downloads/BNC_wordlists/e_lemma.txt\n* https://www.kaggle.com/datasnaek/youtube-new\n\n### Files\n\n* [5k_original.txt](5k_original.txt): This original list contains \"n't\" as a word. Because that's\n\tnot compatible with our procedure for generating the lexicon, that line is removed in the copy\n\tthat we use.\n* [lemmas_original.txt](lemmas_original.txt): A few different changes were made to the set of\n\tlemmas (notably: removing the header and adding contractions as full words), so the original is\n\tstill present here for comparison.\n* [USvideos.zip](USvideos.zip): This is a zipped CSV of the set of trending YouTube videos over a\n\tcertain time period. We took this from the \"youtube-new\" dataset linked above. My intuition was\n\tthat popular videos were more likely to have hand-written subtitles and better conversation as\n\topposed to random YouTube videos." }, { "alpha_fraction": 0.6092715263366699, "alphanum_fraction": 0.6622516512870789, "avg_line_length": 15.777777671813965, "blob_id": "7f84be4dba0e2d1003d5d05fbfdca94946e1d9a5", "content_id": "2fa6238cc78dfc8c9a7dab2122af744e9d721a4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 152, "license_type": "permissive", "max_line_length": 51, "num_lines": 9, "path": "/kaolin-eyeswipe-recorder/Server.swift", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "//\n// Server.swift\n// EyeSwipeRecorder\n//\n// Created by George Wang on 3/6/21.\n// Copyright ยฉ 2021 EyeSwipe. All rights reserved.\n//\n\nimport Foundation\n" }, { "alpha_fraction": 0.7549019455909729, "alphanum_fraction": 0.7549019455909729, "avg_line_length": 33, "blob_id": "94baed7b21f25623574a35de04f6c6065a2def2c", "content_id": "6d15af49ccc23b6e721a14651b976477ca494a6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "permissive", "max_line_length": 89, "num_lines": 3, "path": "/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# EyeSwipe\n\nMaking a proper README is a priority. The project roadmap is in [roadmap.md](roadmap.md).\n" }, { "alpha_fraction": 0.7119265794754028, "alphanum_fraction": 0.7119265794754028, "avg_line_length": 24.9761905670166, "blob_id": "4d7faa258c4c72fd7e59a0850ae2046822c53a84", "content_id": "e771bd431a4b262e91ac7e1567cc046619750e48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "permissive", "max_line_length": 97, "num_lines": 42, "path": "/Sentences/sets.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# provides a simple interface for getting the sentence associated with a given sentence id number\nimport os\nimport re\nimport json\n\nfrom . import sentence_consts as consts\n\n__all__ = ['from_id', 'as_labels', 'load_sentences', 'unload_sentences']\n\nsentences = None\n# preloads the sentences in memory so that we don't have to repeatedly read the file.\ndef load_sentences():\n\tglobal sentences\n\tif sentences is not None:\n\t\treturn\n\n\twith open(consts.cleaned_sentences) as f:\n\t\tsentences = f.read().split('\\n')\n\ndef unload_sentences():\n\tglobal sentences\n\tsentences = None\n\ndef from_id(id_num, remove_punctuation=True):\n\tload_sentences()\n\ts = sentences[id_num]\n\tunload_sentences()\n\n\tif remove_punctuation:\n\t\ts = re.sub('[^' + output_set_str + ']', '', s)\n\n\treturn s\n\n# '^' and '$' signify the start and end of the sequence, respectively\noutput_set_str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ^$'\noutput_set = list(output_set_str)\nchar_dict = {}\nfor i, c in enumerate(output_set):\n\tchar_dict[c] = i\n\ndef as_labels(sentence):\n\treturn [char_dict[c] for c in list('^' + sentence + '$')]" }, { "alpha_fraction": 0.6997487545013428, "alphanum_fraction": 0.7035176157951355, "avg_line_length": 33.31034469604492, "blob_id": "8c4c1cc6905246190e39be2f8e86b2a793e0daf2", "content_id": "21ec2a27e84181454f9873ede576c2a34f693b51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3980, "license_type": "permissive", "max_line_length": 102, "num_lines": 116, "path": "/Convert (deprecated)/convert_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Assumes that all of the files in 'Import/' are video files that have already been renamed to a\n# format that we want them to be: e.g. '7.MOV', '21.mp4'\n#\n# we'll run into issues if there are any files in 'Import/' that are not video files.\n#\n#\n# USAGE:\n# This script exists to process raw video files, typically from 'Import/', and convert them to the\n# format that we want. In the process of converting new data, we update 'namespace.json' to avoid\n# any naming conflicts.\n#\n# The training data can also be reconstructed from the source video files in 'SourceData/', which\n# will not change 'namespace.json'\n#\n# Choosing between these options is done with an interactive interface, so it need not be specified\n# beforehand.\n\nimport os\nimport sys\nimport json\n\nimport cv2\nimport dlib\n\nfrom delayedinterrupt import DelayedInterrupt\nimport convert_helper as helper\n\n# import `consts.py` from the parent directory\nfrom os.path import abspath, join, dirname\nsys.path.append(abspath(join(dirname(abspath(__file__)), '..')))\n\nimport consts\n\n# Choose between rebuilding from source files or going without\n# the 'yes' == ... converts 'yes'/'no' to True/False\nfrom_source = 'y' == helper.query_yes_no('Rebuild from source data? (y/n): ',\n\t\t\t\t\t\t\t\t\t\t \"Please enter 'y' or 'n'. Rebuild? (y/n): \")\n\n# This is the directory that we'll get all of our video files from.\ndata_dir = consts.source_data_dir if from_source else consts.import_dir\n\nworking_directory_save = os.getcwd()\n\n# convert the videos to sets of frames\nvideos = os.listdir(data_dir)\nfor i, filename in enumerate(videos):\n\twith DelayedInterrupt() as delayed_interrupt:\n\t\thelper.write_flush(\"Working on video '{}' ({}/{})\".format(filename, i+1, len(videos)))\n\n\t\t# use `namespace` to rename the image to a unique name\n\t\tif not from_source:\n\t\t\tfilename = helper.rename_with_namespace(delayed_interrupt, filename)\n\t\t\tif filename is None:\n\t\t\t\tsys.exit()\n\n\t\t\thelper.write_flush(\"\\rWorking on video '{}' ({}/{}, renamed)\\n\".format(filename, i+1, len(videos)))\n\t\telse:\n\t\t\t# we're done now\n\t\t\thelper.write_flush('\\n')\n\n\t\t# Convert all of the images to files\n\t\tsubdir_name = filename.split('.')[0]\n\t\thelper.write_flush('\\tConverting to image sequence...')\n\t\thelper.convert_to_imgs(filename, subdir_name, data_dir)\n\t\thelper.write_flush(' Done\\n')\n\n\t\t# NOTE: We change our working directory here so that all of the operations are on relative\n\t\t# filepaths.\n\t\tos.chdir(os.path.join(data_dir, subdir_name))\n\n\t\t# do our image processing\n\n\t\thelper.write_flush(\"\\tFinding faces...\")\n\t\tfaces = helper.detector(dlib.load_rgb_image(\"1.jpg\"), 0)\n\t\thelper.write_flush(\" Done! Found {}.\\n\".format(len(faces)))\n\n\t\tif len(faces) != 1:\n\t\t\tprint(\"Cannot use video '{}', did not contain exactly one face.\".format(filename))\n\t\t\t \n\t\t\t# save the original video\n\t\t\tos.chdir(working_directory_save)\n\t\t\thelper.move(filename, consts.data_dir, consts.source_data_dir)\n\n\t\t\tcontinue\n\n\t\tface_box = faces[0].rect\n\n\t\tfiles = os.listdir('.')\n\t\tfor i, img_path in enumerate(files):\n\t\t\tpercent = 100 * i // len(files)\n\t\t\thelper.write_flush(\"\\r\\tProcessing images... {}%\".format(percent))\n\n\t\t\thelper.crop_image(i, img_path, face_box)\n\n\t\thelper.write_flush(\"\\r\\tProcessing images... Done!\\n\")\n\n\t\t# make a note of how many frames there were\n\t\twith open(consts.num_total_file, 'w+') as f:\n\t\t\tjson.dump(len(files), f)\n\n\t\t# instead of collecting the image files into videos, we'll leave them as is\n\t\t# we will, however, remove the other video files, though. We'll leave a simple text file to\n\t\t# tell us how many frames there are in the video\n\t\t#\n\t\t# because we're using 'files', we only get the original images, not the new ones\n\t\thelper.write_flush(\"\\tFinishing up... \")\n\t\tfor img_path in files:\n\t\t\tos.remove(img_path)\n\n\t\tos.chdir(working_directory_save)\n\n\t\t# move the directory to data\n\t\thelper.move(subdir_name, data_dir, consts.data_dir)\n\t\t# save the original video to source data\n\t\thelper.move(filename, data_dir, consts.source_data_dir)\n\t\thelper.write_flush(\" Done!\\n\")\n" }, { "alpha_fraction": 0.7764680981636047, "alphanum_fraction": 0.7821511030197144, "avg_line_length": 53.620689392089844, "blob_id": "6589a55c2d019c77dd5390660b842af4c6e84a5f", "content_id": "b6cf193c047e73ffe754ff35a3b64d8afa5e5e1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4751, "license_type": "permissive", "max_line_length": 118, "num_lines": 87, "path": "/Server/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Server\n\nThis directory contains all of the files necessary to run a simple HTTP server to connect with our\ndata-collection app.\n\nThere are still significant portions of this server left to build. The entire pipeline envisioned\nis as follows:\nThe app requests 1 (or more) sentences from the server to have the user swipe and uploads the video\nto the server. This could go in many orders (such as bulk sentence request, then bulk upload VS.\none at a time), but these are the two core functionalities.\n\nOther features that are nice to have are: individual user tracking, so we can put some individuals\nin our holdout set; storing video/user metadata to allow that input to the model.\n\nThis portion of the project is written in Go, so files are stored in idiomatic Go fashion. The only\nexception here is that `GOPATH` should be set to [this](.) directory (EyeSwipe/Server). That being\nsaid, it should be noted that no part of the executable requires a specific path relative to the\nrest of the project - 'sentences-link.txt' can be changed to allow the freedom to move the absolute\npath to this directory.\n\n### Files\n\n* **bin/sentences-link.txt**: A symbolic link to EyeSwipe/Sentencecs/sentences.txt\n* **bin/server-state.json**: A saved copy of the state of the server (not present here).\n\n\n-----------------------------------\n\nUpdate 2/23/21: Go Server refactored and completed. Several changes to server design and data pipeline\nwere made:\n\nNew data pipeline: The server has only two handler functions: getSentence and storeVideo. App users issue\na GET request to serverIP:8080/sentence/get and receive a random sentence from the sentence pool. Similarly, \nusers issue a POST request containing metadata and a video file to serverIP:8080/data/upload to store video.\nUpload requests are entirely decoupled from sentence-getting, and all necessary metadata including userID\nand the sentence itself are contained in the metadata of each upload request.\n\nThese two methods capture the core functionality of the server while retaining a simple design. Previous design\nassigned sentences to users based on an allocation system, while the new design simply chooses a random\nsentence from the sentence pool and assigns it. In practice there is a very small chance that a user will\nbe assigned the same sentence twice, and even if this happens there is no real downside to it.\n\nData storage is as follows:\n\nEyeSwipe/\n\tServer/\n\t\tsrc/\n\t\t\t...\n\t\ttesting/\n\t\t\t...\n\t\tdata/\n\t\t\tuserID1/\n\t\t\t\thash(sentence)/\n\t\t\t\t\t1.mov\n\t\t\t\t\tmetadata-1.txt\n\t\t\t\t\t2.mov\n\t\t\t\t\tmetadata-2.txt\n\t\t\tuserID2/\n\t\t\t\t...\n\nWithin the data directory, files are stored first according to userID, to make grouping by user convenient.\nAfter this, a video for a particular sentence is stored in its own directory, along with associated metadata.\nFor uniqueness the directory name is the hash of the sentence whose video is being stored. If a user has n \nvideos on file for a sentence, the videos and their associated metadata files are numbered 1-n and stored in \nthe same directory. UserID is chosen on the app end; a good choice would be to have users enter their emails,\nand permanently save userID as email or hash(email).\n\nDesign update #1: Server is now stateless. After the new data pipeline was envisioned, it became clear that\nserver management of userIDs, etc was unnecessary. All necessary data for both directions of data flow are \ndirectly included in HTTP requests, so the server does not need to remember any information about users or\nsentence allocation.\n\nDesign update #2: sentences.go renamed/refactored into io.go, and main method moved to run_server.go. With a stateless\ndesign, the MainContext struct became unnecessary and was replaced by the sentenceManager struct, which maintains\na slice containing all sentences in the sentence pool, and methods initialize(), which loads sentences.txt into the\nsentenceManager, and getSentence() which returns a random sentence from the pool. The rest of io.go contains methods\nto create and manage the file system described above. \n\nDesign update #3: math/rand replaced with crypto/rand. This is a minor point, but math/rand methods are deterministic\nacross different runs, so if the server is restarted more frequently than the sentences file is updated, there\nwill be a subset of sentences which are picked first repeatedly, leading to an uneven distribution of selected\nsentences.\n\nDesign update #4: Added a basic testing suite. Running go test after starting the server will query the server for\na sentence and print it, and then send a POST request containing testing/test.mov and test metadata and test\nfor the presence of ../data/testID/hash(testSentence)/1.mov, ../data/testID/hash(testSentence)/metadata1.txt and\ncheck their integrity." }, { "alpha_fraction": 0.7474191188812256, "alphanum_fraction": 0.7591190934181213, "avg_line_length": 49.13793182373047, "blob_id": "64c7fcc79c00160a7b0f8c9adffcd7a784fe0ffa", "content_id": "a86d75727003535139276899e5d5737105e0add4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1453, "license_type": "permissive", "max_line_length": 120, "num_lines": 29, "path": "/Sentences/Lexicon/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Lexicon\n\nThis directory contains the relevant files for constructing the lexicon ('lexicon.txt'). The\nlexicon was originally going to be used for the output layer for our model, but that is no longer\nthe case. It is currently being used to help with [parsing](../get_sentences_from_subtitles_script.py)\nYouTube subtitles into sentences.\n\nEventually, the lexicon will be used to create our language model to aid with the output\nprobabilities.\n\n### Creation\n\nThe lexicon is created as the product of two lists: the 5000 most frequently used English words,\nand a set of lexemes (word families) for a large number of English words (\\~14.8k). Word families\nare essentially the conjugations of a word (e.g. 'run' -> 'runs', 'ran', and 'running'). Our\nlexicon is then composed of each word in the 5k list, in addition to all other words in its lexeme.\n\nNote:\n\t\n* At the top of the list of lemmas are a few other words that were not originally present, mainly\n\tto include contractions. Other contractions were added in to the list manually. The\n\t[original list](../Generation/Sources/lemmas_original.txt) can be found in [Generation/Sources](../Generation/Sources).\n* '5k.txt' contains some duplicates. Those are accounted for when we create the lexicon.\n\n### Files\n\n* [5k.txt](5k.txt): A list of the 5000 most frequently used English words.\n* [lemmas.txt](lemmas.txt): A list of lexemes for the top \\~15k headwords.\n* [lexicon.txt](lexicon.txt): The lexicon." }, { "alpha_fraction": 0.6725440621376038, "alphanum_fraction": 0.6750629544258118, "avg_line_length": 19.35897445678711, "blob_id": "01358ee09259bd4f372bed2f5545e8fcfaed3a73", "content_id": "2637ad43f4a26140bbe586a5aafbef986467d5cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "permissive", "max_line_length": 63, "num_lines": 39, "path": "/Sentences/random_sentences_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport sys\nimport random\n\nimport sentence_consts as consts\n\nto_file = None\n\nif len(sys.argv) != 2:\n\tprint(\"Expected argument for number of sentences to get\")\n\tsys.exit()\n\t\nsys.stdout.write(\"Would you like to output to a file? (y/n): \")\nsys.stdout.flush()\nwhile True:\n\tresponse = sys.stdin.readline().strip()\n\n\tif response == 'y':\n\t\tto_file = True\n\t\tbreak\n\telif response == 'n':\n\t\tto_file = False\n\t\tbreak\n\n\tsys.stdout.write(\"Please enter 'y' or 'n'. To file? (y/n): \")\n\nwith open(consts.cleaned_sentences) as f:\n\tsentences = f.read().split('\\n')\n\nnum_get = int(sys.argv[1])\nrandom_set = random.sample(sentences, num_get)\nas_string = '\\n'.join(random_set)\n\nif to_file:\n\twith open(consts.random_sentence_list, 'w') as f:\n\t\tf.write(as_string + '\\n')\nelse:\n\tprint(as_string)\n" }, { "alpha_fraction": 0.8225806355476379, "alphanum_fraction": 0.8225806355476379, "avg_line_length": 45.75, "blob_id": "4768d574b5bc9d3463e067a1bfddbe53120aaf37", "content_id": "a3d1bdf0eb369878e9efe204448b126610b0f8ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "permissive", "max_line_length": 96, "num_lines": 4, "path": "/Checkpoints/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Checkpoints\n\nThis directory serves as a storage place for saved model checkpoints. The checkpoints themselves\nare left out of git, so this directory is intentionally left mostly empty." }, { "alpha_fraction": 0.6402502655982971, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 23, "blob_id": "92865214e848e0f2ea61b8ac39eb9e6b17e2972f", "content_id": "1e8b99a278d8e84a54df92ce790bf13ed50dcd64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "permissive", "max_line_length": 92, "num_lines": 40, "path": "/Sentences/Lexicon/generate_lexicon_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This script is only run once, to generate 'lexicon.txt', a vocabulary of ~11000 words. For\n# information on the usage of the lexicon, see this directory's README.\n\nword_dict = {}\nword_list = []\nwords = []\n\n# get a list of words, but with duplicates removed (as 5k.txt has a few duplicates)\nwith open(\"5k.txt\") as f:\n\tfor line in f:\n\t\tword = line.rstrip(\"\\n\")\n\n\t\tif word not in word_dict:\n\t\t\tword_dict[word] = None\n\t\t\tword_list.append(word)\n\nlemmas = {}\nwith open(\"lemmas.txt\") as f:\n\tfor line in f:\n\t\thalves = line.rstrip(\"\\n\").split(\" -> \")\n\n\t\tlemmas[halves[0]] = halves[1].split(\",\")\n\nall_word_dict = {}\nall_word_list = []\n\nfor word in word_list:\n\tif word not in all_word_dict:\n\t\tall_word_dict[word] = None\n\t\tall_word_list.append(word)\n\n\tif word in lemmas:\n\t\tfor w in lemmas[word]:\n\t\t\tif w not in all_word_dict:\n\t\t\t\tall_word_dict[w] = None\n\t\t\t\tall_word_list.append(w)\n\nwith open(\"lexicon.txt\", \"w\") as f:\n\tfor word in all_word_list:\n\t\tf.write(word + \"\\n\")" }, { "alpha_fraction": 0.6419558525085449, "alphanum_fraction": 0.64826500415802, "avg_line_length": 17.647058486938477, "blob_id": "abdfcf251b13c791d6782a02d58733fbcb4e351d", "content_id": "f5759bc0fd53c15791aa3ba6d1fb5d66dd0018ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 634, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/Server/src/run_server.go", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\t\"log\"\n\t\"io/fs\"\n)\n\nfunc main() {\n\tsentManager := &SentenceManager{}\n\tsentManager.loadSentences()\n\n\tinfo, err := os.Stat(\"../data\")\n\tif os.IsNotExist(err) {\n\t\tos.Mkdir(\"../data\", fs.ModeDir)\n\t\terr = nil\n\t}\n\tif (info != nil) && (!info.IsDir()) {\n\t\tpanic(\"Error: file 'data' exists and is not a directory\")\n\t} \n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\thttp.HandleFunc(\"/sentence/get\", sentManager.sentenceGetHandler)\n\thttp.HandleFunc(\"/data/upload\", videoUploadHandler)\n\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\treturn\n\t})\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}\n" }, { "alpha_fraction": 0.730042040348053, "alphanum_fraction": 0.7447478771209717, "avg_line_length": 57.306121826171875, "blob_id": "9fbe574fec050fb672feda73b343c16d2383c357", "content_id": "f258f797645c8662ed5df427a503bdfb22a17bf7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2856, "license_type": "permissive", "max_line_length": 126, "num_lines": 49, "path": "/Convert (deprecated)/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Convert\n\n**NOTE: DEPRECATED.** Below is the purpose it previously served.\n\nThis directory contains all of the relevant files used for creating training data from our source\nvideo files.\n\nThe basic data conversion process is fairly simple. Once data has been loaded into ['/Import'](../Import)\n(in the top-level directory), we rename the video and use ffmpeg to convert it into a series of\nimages. Dlib's face detection is used on the first frame to locate the general position of the face\nin the video, and the facial landmark detector is used to crop each image. SIGINT is delayed\nthroughout the majority of this process in order to ensure that we won't ever need to revert\nchanges once we've made them (e.g. renaming, creating subdirectories, etc.), or clean up partway\nthrough. The data that we're saving is the collection of cropped images.\n\nAfter we have a subdirectory -- usually within '/Import' of cropped images, we transfer that set to\n['/Data'](../Data), and move the original video file to ['/SourceData'](SourceData).\n\nThe other method for conversion instead rebuilds the contents of 'Data' from the files in\n'SourceData'. This can be selected with an interactive menu at runtime.\n\n### Files\n\n* [delayedinterrupt.py](delayedinterrupt.py): This file establishes a simple class to be used to\n\tprevent SIGINT from halting key parts of the conversion process -- allowing it to be terminated\n\tsafely at any time. More information can be found in the file.\n* [convert_helper.py](convert_helper.py): Just a simple helper file for 'convert_script.py'. It\n\ttakes care of most of the function, while the script itself displays the interaction.\n* [convert_script.py](convert_script.py): This is the main script for the video conversion process.\n\n##### Files not present\n\n* **namespace.json**: This file serves to record how many videos for each unique sentence ID have\n\talready been added to the dataset. This file is not included, as it is only useful in\n\tcombination with the dataset, which is not present in this repo.\n* **mmod_human_face_detector.dat**: Will be removed soon. This is one of dlib's supplied face\n\tdetectors. It does not come pre-installed with the rest of dlib, but can be downloaded\n\t[here](http://dlib.net/files/mmod_human_face_detector.dat.bz2)\n* **shape_predictor_68_face_landmarks.dat**: This will also be removed soon. This is a facial\n\tlandmarks detector supplied by dlib, and it can be downloaded\n\t[here](http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2).\n\n### An additional note, post-deprecation:\n\nBecause `namespace.json` is not present, and these scripts may not be run again, here is the last\nvalue of `namespace.json` with just a few videos all done by one person:\n```json\n{\"14\": 1, \"1\": 1, \"2\": 1, \"4\": 1, \"6\": 1, \"7\": 1, \"12\": 1, \"3\": 1, \"8\": 1, \"13\": 1, \"11\": 1, \"9\": 1, \"10\": 1, \"5\": 1, \"15\": 1}\n```" }, { "alpha_fraction": 0.538644552230835, "alphanum_fraction": 0.5542771220207214, "avg_line_length": 35.60529327392578, "blob_id": "9566df63b23b5f2a80de1794c77367113fc42f75", "content_id": "19d4f1e1a227ee4860ebbb5b5a89bf874badc374", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33204, "license_type": "permissive", "max_line_length": 123, "num_lines": 907, "path": "/Sentences/Generation/make_sentences_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport sys\nimport html\nimport datetime\nimport unicodedata\n\nimport videolist as vlist\nimport gen_consts as gconsts\n\n# import 'sentence_consts' from .. as 'sconsts'\nfrom os.path import abspath, join, dirname\nsys.path.append(abspath(join(dirname(abspath(__file__)), '..')))\nimport sentence_consts as sconsts\n\n# There are some subtitles that have been found through manually looking\n# at the set that appear to be complete garbage. That list is here:\n#\n# Reasons:\n# (0) gibberish (different language?)\n# (1) final timestamp is at 27 HOURS <-- WTF?\ngarbage_subs = {'1M5r_B1_WZ8', 'P6ODTQKhaXk'}\n\n# If True, this will instead output to a separate file, where we can\n# find the videoID associated with each sentence\ndebug = True\ndebug_file = join(dirname(abspath(__file__)), 'DEBUG_SENTENCES.txt')\n\n# load the lexicon, for later\nwith open(sconsts.lexicon_file) as f:\n lexicon = set(f.read().split('\\n'))\n\nfile = gconsts.all_sentences if not debug else debug_file\nif os.path.exists(file):\n os.remove(file)\n\ndef write_sentences(videoID, sentences):\n with open(file, 'a+') as f:\n if debug:\n f.write(videoID + '\\n')\n sentences = ['\\t' + s for s in sentences]\n f.write('\\n'.join(sentences) + '\\n')\n\n# ***********************************************************\n# NOTES ON FORMATTING\n# ***********************************************************\n\n# all subtitles start with the following:\n\"\"\"\nWEBVTT\nKind: captions\nLanguage: en\n\"\"\"\n# The rest of the file then looks like this:\n\"\"\"\n00:00:27.760 --> 00:00:30.129\nTrump's presidency\nis like one of his handshakes.\n\n00:00:30.229 --> 00:00:33.032\nIt pulls you in,\nwhether you like it or not.\n\n00:00:33.833 --> 00:00:36.402\nHe's had so many\nterrible moments this year,\n\n00:00:36.502 --> 00:00:38.137\nyou probably forgot about\nmany of them.\n\"\"\"\n# Taken from videoID: '1ZAPwfrtAFY'\n#\n# There are a few other formatting points:\n# * Subititles support html formatting (e.g. <i>foo</i>)\n# * As such, some html characters are escaped. The most common ones\n# are '&lt;' and '&gt;'\n# * ALL SUBTITLES END WITH TWO BLANK LINES. This is an important detail\n# * Styles differ across channels and videos, so here's a list of edge\n# cases that must be accounted for:\n# * Escaped phrases or descriptions, e.g. [Laughter]\n# * Rolling text -- each block contains overlap with the previous to\n# give the impression of text rolling up the screen\n# * Additional formatting on the first line of the blocks, such as:\n# 00:00:03.536 --> 00:00:04.303 align:start position:0%\n# * (Mostly) Empty lines within blocks (we can't just split by '\\n\\n')\n# * They won't be completely empty, as far as I can tell: they all\n# have -- at the very least -- spaces on those lines\n# * '>>' to denote a change in speaker\n# * '$Name: Text' or '($Name) Text' to denote who's talking\n# * Note: These two will sometimes be used in combination, but only\n# use a named switch for certain people, not others. For an\n# example, see this videoID: W7SZmMW6Ow0\n# * 'Cues', which prefix blocks with a number, starting at 1 and\n# counting up. Example:\n\"\"\"\n WEBVTT\n Kind: captions\n Language: en\n\n 1\n 00:00:00.000 --> 00:00:05.580\n [MUSIC]\n\n 2\n 00:00:05.580 --> 00:00:06.396\n That's not his briefcase.\n\"\"\"\n# * All-caps (sometimes even while $Name is normally cased)\n# ***********************************************************\n\nclass TextGroup():\n def __init__(self, lines, start_time, end_time):\n self.lines = lines\n self.start_time = start_time\n self.end_time = end_time\n\n def from_text(time_header, body):\n lines = body.split('\\n')\n\n # Get the time from the header. The header is guaranteed to be\n # formatted like:\n # 00:00:00.000 --> 00:00:05.580\n # even though they may be formatted differently in the raw text\n # of the subtitles themselves:\n # 00:00:03.536 --> 00:00:04.303 align:start position:0%\n\n start, end = time_header.split(\" --> \")\n\n time_fmt = \"%H:%M:%S.%f\"\n start_time = datetime.datetime.strptime(start, time_fmt)\n end_time = datetime.datetime.strptime(end, time_fmt)\n\n lines = [replace_html_escaped_chars(l) for l in lines]\n\n return TextGroup(lines, start_time, end_time)\n\n def time_diff(a, b):\n # returns the amount of time, in seconds, between the end of `a`\n # and the start of `b`, as a float.\n \n return (b.start_time - a.end_time).total_seconds()\n\n def join(a, b, merge_overlap=True):\n # combines `a` and `b` into one `TextGroup`.\n #\n # if merge_overlap is True (default), then it we'll treat the\n # overlapped region as expected -- it'll only appear once.\n #\n # NOTE: This does not take into account the time difference\n # between the two groups\n\n new_lines = []\n\n if merge_overlap:\n def non_empty_line(l):\n return re.match(r\"^\\s*$\", l) is None\n\n b.lines = list(filter(non_empty_line, b.lines))\n\n new_lines = a.lines + b.lines[TextGroup.overlap(a, b):]\n else:\n new_lines = a.lines + b.lines\n\n start = a.start_time\n end = b.end_time\n\n return TextGroup(new_lines, start, end)\n\n def overlap(a, b):\n # gives the number of lines that overlap between the two groups,\n # where `b` starts with the end of `a`\n #\n # I have only ever seen this done with Jimmy Kimmel's videos,\n # but he has reached the trending page an unfortunate number of\n # times.\n #\n # Here's an example videoID to take a look: eIp7PYuAu0k\n\n if len(a.lines) == 0 or len(b.lines) == 0:\n return 0\n\n # check if `a` contains the ending string of `b`\n if b.lines[0] not in a.lines:\n return 0\n\n # the starting line of the overlap\n start_index = a.lines.index(b.lines[0])\n\n # verify that the rest of the lines are the same\n overlap_size = 1\n for a_line, b_line in zip(a.lines[start_index+1:], b.lines):\n if a_line != b_line:\n return 0\n\n overlap_size += 1\n\n return overlap_size\n\n def overlaps(a, b):\n # Returns whether or not the two groups have any overlap,\n # irrespective of the time difference between them\n\n return TextGroup.overlap(a, b) != 0\n\n\ndef subtitle_uses_overlap(groups):\n # Determines whether or not the subtitles for a video overlap groups\n # in order to give a scrolling effect\n\n n_with_overlap = 0\n n_small_gap = 0\n for a, b in zip(groups, groups[1:]):\n if TextGroup.time_diff(a, b) < gconsts.subtitle_max_gap_time:\n n_small_gap += 1\n if TextGroup.overlaps(a, b):\n n_with_overlap += 1\n\n if n_small_gap == 0:\n return False\n\n fraction_with_overlap = n_with_overlap / n_small_gap\n \n return fraction_with_overlap >= gconsts.subtitle_min_fraction\n\ndef read_groups(videoID):\n # returns a list of `TextGroup`s corresponding to the videoID. If no\n # file is found, then it returns None\n\n # get the file for the videoID. The file format is:\n # $VIDEOID.en.vtt\n # but we're using gen_consts.ext to cover for the last bit\n filepath = os.path.join(gconsts.subtitles_dir, \"{}.{}\".format(videoID, gconsts.ext))\n if not os.path.exists(filepath):\n return None\n\n with open(filepath) as f:\n # The first block is the header, which looks something like this:\n \"\"\"\n WEBVTT\n Kind: captions\n Language: en\n \"\"\"\n # So we don't include the first block.\n #\n # We'd like to be able to split by '\\n\\n', but ONE subtitle\n # doesn't adhere to this format. SO, we'll use a more complex\n # solution that doesn't rely on a usually-adhered-to style for\n # writing the subtitles, and instead use the specification of\n # the format itself\n\n time_fmt = r\"[0-9]{2}:[0-9]{2}:[0-9]{2}\\.[0-9]{3}\"\n time_line = r\"({} --> {})[^\\n]*\\n\".format(time_fmt, time_fmt)\n\n pattern = r\"(\\n\\d+)?\\n{}(([^\\n]*\\n?)*?)(?=\\n(\\d+\\n)?{}|$)\"\n\n # pattern = r\"(?:(?:\\n\\d+)?\\n){}((?:[^\\n]*\\n)*?)(?=({})|$)\"\n pattern = pattern.format(time_line, time_line)\n\n blocks = re.finditer(pattern, f.read())\n\n # Group 2 is the first time range, group 3 is the body of text\n blocks = list(map(lambda m: (m.group(2), m.group(3)), blocks))\n\n return [TextGroup.from_text(b[0], b[1]) for b in blocks]\n\ndef replace_html_escaped_chars(s):\n # remove html tags\n s = re.sub(r\"<.+?>\", \"\", s)\n s = re.sub(r\"</.+?>\", \"\", s)\n\n # Replace the various escaped characters\n return html.unescape(s)\n\ndef preprocess_groups(groups):\n # Takes a list of `TextGroup`s and does a few key things:\n # * If the subtitles are formatted to have overlaps\n # (see list above), groups that overlap are combined\n # * Returns a list of time groups. We're defining time groups as\n # sets of sequential subtitle groups that have are effectively\n # continuous, i.e. there is no gap greater than the maximum gap\n # time, as defined in 'gen_consts.py'\n # * The list that is returned is a list of lists of lines, where\n # we have concatenated the lists of lines within the indiviual\n # subtitle groups into one time group.\n \n if len(groups) == 0:\n return []\n\n # Distinct groupings, determined by the amount of time between\n # groups. Any gap greater than `gconsts.subtitle_max_gap_time` will\n # be used to indicate a different time group.\n time_groups = [groups[0]]\n \n for g in groups[1:]:\n last_group = time_groups[-1]\n\n if TextGroup.time_diff(last_group, g) > gconsts.subtitle_max_gap_time:\n time_groups.append(g)\n continue\n\n time_groups[-1] = TextGroup.join(last_group, g)\n\n return time_groups\n\n# charset = r\"a-zA-Z0-9.\\'\\\"\\-,;:?! \"\n\ndef format_line(line, urls=None):\n # Formats a line for use in to_sentences()\n #\n # This function only exists to separate certain parts of\n # `to_sentences()`, so many pieces of this function only make sense\n # when understood within the context of the other.\n #\n # <speaker> will always indicate a new speaker (but will not catch\n # all of them)\n #\n # <char> indicates a character that is outside of our character set\n #\n # PLEASE NOTE: Quotations are not removed -- that will be done later\n # because they often cross lines.\n\n # Do a couple standardizing things -- both with whitespace and\n # dashes. All whitespace is converted to a single space, and all\n # dash-like characters are converted to '-'\n l = re.sub(r\"\\s+\", \" \", line)\n # left side is en dash, right side is em dash\n l = re.sub(r\"(โ€“|โ€”)\", \"-\", l)\n\n # remove accents -- borrowed from a stackoverflow post:\n # https://stackoverflow.com/a/31607735\n # l = unicode(l)\n l = unicodedata.normalize('NFD', l)\n l = l.encode('ascii', 'ignore')\n l = l.decode('utf-8')\n\n # Remove and store any urls. This pattern is heavily inspired by the\n # one found at https://stackoverflow.com/a/3809435\n # This one isn't perfect, and needed to be modified slightly to\n # avoid matching on acronyms, but it should be good.\n url_pattern = r\"([-a-zA-Z0-9]+:(//)?)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{2,6}\\b([-a-zA-Z0-9()@:%_+.~#?&/=]*)\"\n def replace_url(match):\n if urls is None:\n return \"<url>\"\n else:\n # group 0 is the entire string\n index = len(urls)\n urls.append(match.group(0))\n return \"<url-{}>\".format(index)\n l = re.sub(url_pattern, replace_url, l)\n\n # We'll do a small thing here that should aid in readability.\n # Because percentages often follow a predictable format, we'll\n # replace them with 'percent'\n l = re.sub(r\"(?<=\\d)%\", \" percent\", l)\n\n # Remove all characters that won't:\n # A: Be a part of the final sentence, or\n # B: Help inform sentence boundaries\n # We'll replace those charactrs with '<char>' to indicate that there\n # was an unsupported character in its place.\n #\n # Two characters we specifically NEED to remove are open and close\n # angle brackets ('<', '>'), because we use them in tags. We've\n # already made one tag with them -- '<url>', so we want to replace\n # unsupported characters EXCEPT where they are being used as part of\n # '<url>'.\n #\n # The regex pattern we're using is more complicated than I'd like,\n # but it seems to be the best solution (others require more helper\n # functions). URL tags are not fixed width -- it may also be of the\n # form '<url-$N>', where $N is an unbounded non-negative integer.\n # Because of this, we can't use a lookbehind assertion to only match\n # the text in-between, so we include url tags at the start of our\n # match. The replacement of characters then operates on the second\n # match group, while we preserve the first. The rest of the regex\n # should be *fairly* self-explanatory. \n keep_in = r\"a-zA-Z0-9.\\'\\\"\\-,;:?!()\\[\\]*'\\\" \"\n\n def repl_chars(match):\n return match.group(1) + re.sub(\"[^{}]+\".format(keep_in), \"<char>\", match.group(2))\n #\n l = re.sub(r\"(^|<url(?:-\\d+)?>)(.*?)($|(?=<url(-\\d+)?>))\", repl_chars, l)\n\n # Remove some speaker annotations. There's a few different ways this\n # is done that we need to take care of here (others will be\n # inadvertently taken care of later)\n #\n # What are speaker annotations? It's a term I made up (although that\n # may be what it's actually called). It refers to the times in a\n # subtitle in which a change in who's speaking labels them by name.\n # There are a few different ways this is done. Here are some\n # examples, in the time blocks that they originally were in:\n \"\"\"\n 00:00:52.052 --> 00:00:58.391 align:start size:88% position:13%\n &gt;&gt; Stephen: GOOD TO SEE YOU\n TOO.\n *********************\n 00:00:14.530 --> 00:00:16.460\n (Garrett) There's no way that's correct!\n *********************\n 00:00:00.100 --> 00:00:02.040\n STEVE (VO): Obviously, I love James.\n \n <...>\n\n 00:02:10.020 --> 00:02:11.960\n DR. PLIER: For it was camouflage.\n *********************\n 00:00:08.000 --> 00:00:10.000\n G: Hi guys, I'm here with my mom! T: Hey!\n *********************\n 00:00:02.250 --> 00:00:03.203\n - [Group Member] Hey, puppy!\n\n <...>\n\n 00:00:18.480 --> 00:00:20.477\n - Hi, BuzzFeed, we're BTS with--\n\n 00:00:20.650 --> 00:00:22.490\n - Hi, BTS!\n - PPS, the puppies.\n \"\"\"\n # Videos (in order): 2GrKY7Qqal8, -RmUADCWI4A, 0I9vWjc9nao,\n # 0PW9MWDWLH8, 1PhPYr_9zRY\n #\n # Some of these (like the second-to-last exmaple) cannot be done\n # yet, as they require other knowledge about sentence boundaries to\n # remove.\n #\n # For those that don't, however, we CAN get rid of them. We'll\n # ingore speakers switching mid-line for now, and just focus on\n # when it starts the line\n\n parenthetical = r\"(\\([^\\)]*\\)|\\[[^\\]]*\\])\"\n\n name = r\"[A-Z]([-a-z]+((\\. ?| )[A-Z][-a-z]*)*|[-A-Z]+((\\. ?| )[-a-z]+)*)?\"\n\n pattern = r\"^( ?(<char>|-))? ?{}( {})?: \".format(name, parenthetical)\n # \\-------------/ / \\----/\n # spacing | \\--parentheticals\n # name\n l = re.sub(pattern, \"<speaker>\", l)\n\n # collapse tags\n l = re.sub(r\"(<[^>]*>) (?=<[^>]*>)\", r\"\\1\", l)\n\n return l\n\ndef to_sentences(time_groups):\n # Picks out sentences from the lines\n \n lines = [[format_line(l) for l in group.lines] for group in time_groups]\n\n # `format_line` contains plenty of information as to what formatting\n # has been done to the lines at this point. Notably, a couple\n # characters and sequences have been replaced with '<...>'. The\n # first part of this establishes the rest of that\n #\n # We'll apply the same process for every group\n\n all_sentences = []\n\n for g in lines:\n # add the lines back together so that we can deal with one\n # coherent string\n s = '\\n'.join(g)\n\n # Remove various parenthetical statements: '(foo)', '[foo]', and\n # *foo* (or **foo**). These typically are used to convey\n # information other than what's being said - which is what we\n # care about.\n #\n # Additionally (as mentioned above, in `format_line()`)\n # parenthesis can also denote a new speaker, so we'll note that\n # as well\n s = re.sub(r\"\\([^\\)]*?\\)\", \"<paren>\", s)\n s = re.sub(r\"\\[[^\\]]*?\\]\", \"<bracket>\", s)\n s = re.sub(r\"(\\*+)[^*]*?\\1\", \"<asterisk>\", s)\n\n # Instead of removing the inside of quotes, we'll just mark\n # them. Double quotes are easy, because they don't get used in\n # any other circumstance than as quotation, but single quotes\n # use the same character as apostrophes, so it needs to be\n # marginally more complicated.\n s = re.sub(r\"\\\"([^\\\"]*?)\\\"\", r\"<dquote>\\1</dquote>\", s)\n #\n single_pattern = r\"(?<=\\s)__'([^']*?)'__(?![A-Za-z])\"\n s = re.sub(single_pattern, r\"<squote>\\1</squote>\", s)\n\n # ************************************************************\n # The next thing that we need to do is heavily inspired from a\n # certain stackoverflow answer about how to split a text into\n # sentences. We're essentially using it as a way to keep track\n # of all of the special cases that we get in the English\n # language.\n #\n # Answer: https://stackoverflow.com/a/31505798\n #\n # we'll be using '<prd>' to mark periods while we temporarily\n # take them out\n\n # mark certain words that are able to have periods after them in\n # normal sentences.\n #\n # `amb` for ambiguous\n amb = ['Mr', 'Ms', 'Mrs', 'St', 'Dr', 'Prof', 'Capt', 'Lt',\n 'Mt', 'Inc', 'Ltd', 'Co', 'Jr', 'Sr', 'Gov', 'Esq',\n 'Hon', 'Rev']\n # we include '>' before in case of some other tag before it\n pattern = r\"(?<=[\\s^>])({})\\.\".format(\"|\".join(amb))\n s = re.sub(pattern, r\"\\1<prd><maybe>\", s, flags=re.IGNORECASE)\n\n # PhD is a special case:\n s = re.sub(r\"(?<=[\\s^])Ph\\.D\\.\", r\"Ph<prd>D<prd><maybe>\", s)\n\n # Another special case is 'Vs.' or 'v.' in both lower and upper\n # case, which we know should not end a sentence\n #\n # This often occurs when talking about competitions or court\n # cases.\n s = re.sub(r\"(?<=[\\s^])(vs?)\\.\", r\"\\1<prd>\", s, flags=re.IGNORECASE)\n\n replace_prd = lambda match: match.group(0).replace('.', '<prd>')\n\n # Deal with acronyms\n replace_prd_acronym = lambda match: replace_prd(match) + '<maybe>'\n #\n s = re.sub(r\"([A-Z]\\.){2,}\", replace_prd_acronym, s)\n \n # Handle ellipsis\n s = re.sub(r\"\\.{3,}\", r\"<dots>\", s)\n s = re.sub(r\"(?<=[\\s^])(?=<dots>)\", \"<maybe>\", s)\n s = re.sub(r\"(?<=<dots>)(?=[\\s<$])\", \"<maybe>\", s)\n \n # Handle numbers\n s = re.sub(r\"\\d+\\.\\d+\", replace_prd, s)\n # Handling commas covers both European '5,6' (= 5.6) and\n # '2,019'\n replace_comma = lambda match: match.group(0).replace(',', '<comma>')\n #\n s = re.sub(r\"(?<=[\\s^])\\d+(, ?\\d+)+\", replace_comma, s)\n\n # Handle dashes (we've converted all types of dashes to '-').\n # Dashes used as punctuation will have a space bfore or after\n s = re.sub(r\"(\\s)-\", r\"\\1<maybe><dash>\", s)\n s = re.sub(r\"-(\\s)\", r\"<dash><maybe>\\1\", s)\n\n\n # LAST STEP:\n #\n # Handle punctuation and use periods to mark sentence boundaries\n s = re.sub(r\"([.!?])(</(?:s|d)quote>)\", r\"\\1<stop>\\2<maybe>\", s)\n s = re.sub(r\"([.!?])(?=[\\s<$])\", r\"\\1<stop>\", s)\n # NOTE : We're discarding everything in the quotes because we're\n # not sure that it's a complete sentence\n s = re.sub(r\"(?<![,.!?])(</(?:s|d)quote>)\", r\"<discard>\\1<maybe>\", s)\n\n # Another bit about quotes:\n # If the part of the sentence before the start of the quote\n # doesn't properly end, then we need to mark it for being\n # discarded.\n #\n # We're defining a lack of proper ending as not having\n # punctuation that would indicate such an ending.\n s = re.sub(r\"(?<!<stop>)(\\s+<(?:s|d)quote>)\", r\"<discard>\\1\", s)\n\n # Mark cases of URLs and unknown characters at the end of the\n # line with '<maybe>', because they might be used to terminate\n # the sentence\n s = re.sub(r\"(<char>|<url(?:-\\d+)>)(?=\\n)\", r\"\\1<maybe>\", s) \n\n # Commas imply that there's more to the sentence, so we'll use\n # '<continue>' to indicate that.\n s = re.sub(r\",\", r\",<continue>\", s)\n\n # make sure we don't get repeats of '<maybe>'\n s = re.sub(r\"(<maybe>\\s?){2,}\", '<maybe>', s)\n\n # ************************************************************\n # USE THE MARKERS TO SPLIT INTO SENTENCES\n # ************************************************************\n s = s.replace('<stop>', '<maybe><new>')\n s = re.sub(r\"(<maybe>\\s?){2,}\", '<maybe>', s)\n temp = [m.strip() for m in s.split('<maybe>')]\n\n s = \"\"\n last_fragment = None\n def shift_new_sentence(sentence):\n nonlocal s, last_fragment\n\n if last_fragment is not None:\n s += last_fragment + '<stop>'\n last_fragment = sentence\n return\n\n # m = current segment\n # p = previous\n for m, p in zip(temp, [\"\"]+temp):\n # For each sentence block indicated by '<maybe>', we'll\n # attempt to determine whether or not it is a new sentence\n # or a part of the previous.\n #\n # This part is complicated.\n #\n # For reference, here's a list of all of the tags are\n # currently in use\n # * url | url-$N\n # * char\n # * continue\n # * speaker\n # -- only at the beginning of a line\n # * discard\n # * paren | bracket | asterisk\n # * dquote | /dquote\n # -- open and close like html tags \n # * squote | /squote\n # -- open and close like html tags\n # * maybe\n # * new\n # * comma\n # * prd\n # * dash\n # * dots\n # Note that the following tags have already been replaced:\n # * stop\n #\n # Rules that we're going to apply, in order of preference:\n # 1. <new> - if present - will be at the start of the string\n # and will indicate the start of a new sentence\n # 2. <continue> at a sentence boundary should guarantee that\n # the current sentence fragment will be added to the last\n # 3. <speaker> indicates the start of a new sentence\n # 4. If (1) and (2) conflicts with (3), the sentence will be\n # joined with the last fragment and marked for being\n # discarded.\n # 5. Sentences with <char> or <url> between words should be\n # discarded\n # 6. However: <url> or <char>, if at the start or the end,\n # will indicate a sentence boundary. \n # 7. <paren>|<bracket>|<asterisk> are ignored if in the\n # middle or end and used as a sentence boundary if at\n # the start.\n # 8. <discard> will be done after the sentence has been\n # completed and indicates that it should be thrown out\n # 9. All other escaped tags are ignored because they have\n # already been taken into consideration.\n\n # Rule 1\n isNewSentence = m.startswith('<new>')\n\n # Rule 2\n if not isNewSentence:\n # Check for '<speaker>' in the starting tags\n if re.match(r\"^(<[^>]*>\\s?)*<speaker>\", m) is not None:\n isNewSentence = True\n\n # Rule 3\n continued = re.match(r\"<continue>(<[^>]*>\\s?)*$\", p) is not None\n\n # Rule 4\n bypass_add_later = False\n if continued and isNewSentence:\n m = '<discard>' + m\n isNewSentence = False\n bypass_add_later = True\n\n # Rule 5 - we'll attempt to match the entire string\n tag = r\"(?:<[^>]*>\\s?)\"\n other_tag = r\"(?:<(?!url(?:-\\d+)?|char)[^>]+>\\s?)\"\n word = r\"(?:[\\w,.;:!?]+\\s?)\"\n url_or_char = r\"(?:<url(?:-\\d+)?>|<char>)\"\n \n \"\"\"\n edge = r\"{}*{}\\s?(?:{}|{}\\s?)*\"\n start = edge.format(tag, word, word, other_tag)\n end = edge.format(tag, word, word, tag)\n pattern = r\"^({}{})(\\s?{})$\".format(edge, url_or_char, edge)\n \n m = re.sub(pattern, r\"\\1<discard>\\2\", m)\n \"\"\"\n\n start = r\"(?=({}*))\\2{}\\s?(?=((?:{}|{})*))\\3\".format(tag, word, word, other_tag)\n end = r\"(?=({}*))\\5{}\\s?(?=((?:{}|{})*))\\6\".format(tag, word, word, tag)\n\n pattern = r\"^({}{})(\\s?{})$\".format(start, url_or_char, end)\n\n m = re.sub(pattern, r\"\\1<discard>\\4\", m)\n\n # follow through with rules 1 and 2\n if isNewSentence:\n shift_new_sentence(m)\n continue\n\n # Rule 6 (ending)\n pattern = r\"^(<url(-\\d+)?>|<char>)\\s?(<[^>]*>\\s?)*$\"\n if re.match(pattern, p) is not None:\n shift_new_sentence(m)\n continue\n\n # Rule 6 (starting) and 7\n pattern = r\"^(<[^>]*>\\s?)*\" +\\\n r\"<(url(-\\d+)?|char|paren|bracket|asterisk)>\"\n if re.match(pattern, m) is not None:\n shift_new_sentence(m)\n continue\n\n # Now, it's just up to whether or not the fragment is the\n # start of a new sentence\n #\n # We'll only indicate that it's the start of a new sentence\n # if these conditions are met:\n # (1) the fragment starts with a word that starts with an\n # uppercase letter\n # (2) the word appears in the lexicon as lowercase but not\n # uppercase\n #\n # Regex to find the first word, so long as it starts with a\n # capital letter. The word will be the first match group.\n pattern = r\"^(?:<[^>]*>\\s?)*\" +\\\n r\"([A-Z][a-zA-Z]*)\"\n match = re.match(pattern, m)\n if match is not None and not bypass_add_later:\n word = match.group(1)\n\n lower_in = word.lower() in lexicon\n upper_in = (word in lexicon or\n (word[0] + word[1].lower()) in lexicon if len(word) > 1 else False)\n\n if lower_in and not upper_in:\n isNewSentence = True\n\n # if (word.lower() in lexicon and\n # not (word in lexicon or\n # (word[0] + word[1].lower()) in lexicon\n # )\n # ):\n # isNewSentence = True\n\n if isNewSentence:\n shift_new_sentence(m)\n else:\n if last_fragment is None:\n last_fragment = m\n else:\n last_fragment += \" \" + m\n\n if last_fragment is not None:\n s += last_fragment\n\n # Now that we've done essentially everything we needed to, we'll\n # start putting things back\n #\n # Every single tag is handled here.\n s = s.replace('<comma>', ',').replace('<prd>', '.')\n s = s.replace('<dash>', '-').replace('<dots>', '...')\n \n # We actually want to remove quotes, because we don't support\n # typing quotes\n s = re.sub(r\"</?squote>\", \"\", s)\n s = re.sub(r\"</?dquote>\", '', s)\n\n s = s.replace('<paren>', \"\").replace('<bracket>', \"\")\n s = s.replace('<asterisk>', \"\")\n\n # Remove helper control flow tags\n s = s.replace('<new>', \"\").replace('<continue>', \"\")\n s = s.replace('<speaker>', \"\")\n\n # For this case, we're removing URLs. Alternatively, we could\n # choose to save them\n s = re.sub(r\"<url(-\\d+)?>\", \"\", s)\n\n # We're also going to remove unrecognized characters\n s = s.replace('<char>', \"\")\n\n sentences = s.split('<stop>')\n\n # standardize whitespace\n sentences = [re.sub(r\"\\s+\", r\" \", s.strip()) for s in sentences]\n\n # discard all sentences marked for being discarded\n sentences = list(filter(lambda s: '<discard>' not in s, sentences))\n\n all_sentences.extend(sentences)\n\n return all_sentences\n\ndef filter_sentences(sentences):\n def is_good(s):\n return re.match(r\"\\w\", s) is not None\n\n return list(filter(is_good, sentences))\n\ndef ensure_lowercase(sentence):\n # If the given sentence is all uppercase, it converts it to\n # normal case, and if it is lowercase, it is left as is. There are\n # limitations to this, but this is a best attempt. A possible change\n # could be to put sentences that are only uppercase into a different\n # pool for later, as the casing may not be accurate.\n #\n # This will also remove any excess whitespace or certain punctuation\n # from the beginning and end of the sentence\n\n sentence = re.sub(r\"(^[ \\-]+|[ \\-]+$)\", \"\", sentence)\n\n # We'll essentially find all distinct words and if there is at least\n # one lowercase word, we'll say that the sentence is already in\n # lowercase.\n #\n # Note: Because we substitute '%' for 'percent' (lowercase) in\n # `format_line()`, we can't count 'percent' as an example of a\n # lowercase word.\n words = re.findall(r\"[a-zA-Z]+\", sentence)\n #\n words = filter(lambda w: w != 'percent', words)\n #\n # Check that there's a lowercase letter in a word\n if any(re.match(r\"[a-z]\", w) for w in words):\n return sentence\n\n # Because we've now found that it's uppercase, we'll go through and\n # see if we can change each word.\n #\n # If the original uppercase word is in the lexicon, we'll leave it\n # because it may be an acronym. Additionally, unrecognized words\n # will also be left as uppercase.\n #\n # We need to allow \"'\" because of contractions. Our minimum length\n # is to filter out acronyms. There is ONE single-letter word in\n # English that should remain lowercase: \"a\", so we'll deal with that\n # immediately after this substitution\n pattern = r\"[A-Z']{2,}\"\n #\n def sub_word(w):\n w = w.group(0)\n\n # There's a special case for contractions like \"I'm\" and \"I've\":\n # Because they're mixed case, we can't just write `w.lower()`,\n # so we'll specifically filter out contractions that start with\n # `I'`\n if w.startswith(\"I'\"):\n lower = w[:2] + w[2:].lower()\n if w in lexicon or lower not in lexicon:\n return w\n\n return lower\n\n if w in lexicon or w.lower() not in lexicon:\n return w\n\n return w.lower()\n #\n sentence = re.sub(pattern, sub_word, sentence)\n # Now we'll deal with \"a\". We just need to make sure that it's not\n # part of an acronym\n sentence = re.sub(r\"((?<=^)|(?<= ))A(?=[ ,;:?!]|\\.$)\", \"a\", sentence)\n\n # Make sure that the first word is capitalized\n sentence = re.sub(r\"^[a-z]\", lambda m: m[0].upper(), sentence)\n\n return sentence\n\ndef do_video(videoID):\n groups = read_groups(videoID)\n if groups is None:\n return\n\n # for g in groups:\n # print(g.lines)\n\n groups = preprocess_groups(groups)\n\n sentences = to_sentences(groups)\n sentences = filter_sentences(sentences)\n sentences = [ensure_lowercase(s) for s in sentences]\n write_sentences(videoID, sentences)\n\n# These lines are for testing purposes -- in order to test on a single\n# video. Uncomment `do_video` and comment out everything following\n# 'Main:' to test.\n# do_video('1ZAPwfrtAFY')\n# do_video('UB1cGaIW81I')\n# do_video('069D0NmW39o')\n\n# Main:\nvideo_ids = vlist.video_ids()\n\n#\nprogress_width = 30\n#\nkeep = list(filter(lambda v: v not in garbage_subs, video_ids))\nfor i, videoID in enumerate(keep):\n # Make a progress bar\n # should look like this:\n # [====================> ] 67%\n p = int(i / len(keep) * progress_width)\n percent = int(100 * i / len(keep))\n sys.stdout.write('\\r[' + '='*p + '>' + ' '*(progress_width-p) + '] {}%'.format(percent))\n sys.stdout.flush()\n \n # print(videoID)\n do_video(videoID)\n\n# print the progress bar as complete\nprint(\"\\r[{}>] 100%\".format('='*progress_width))" }, { "alpha_fraction": 0.7238346338272095, "alphanum_fraction": 0.7308707237243652, "avg_line_length": 26.731706619262695, "blob_id": "eb8d51d855211659357f4465f2f2ae3d4ad49270", "content_id": "910986b21dd909ec11a39d5d36beadb545ed38c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "permissive", "max_line_length": 99, "num_lines": 41, "path": "/Convert (deprecated)/delayedinterrupt.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This file provides a class that allows the delaying of keyboard interrupts for blocks of code\n# that should not be interrupted. This is used in `convert_script.py` to ensure that we get a clean\n# break when we're processing videos -- sometimes this means a long wait time, but that's okay.\n#\n# This class is adapted from the following stackoverflow answer:\n# https://stackoverflow.com/a/21919644\n\nimport signal\n\nclass DelayedInterrupt():\n\tdef __enter__(self):\n\t\tself.enable()\n\n\t\treturn self\n\n\tdef enable(self):\n\t\tif hasattr(self, 'is_enabled') and self.is_enabled:\n\t\t\treturn\n\n\t\tself.signal_received = False\n\t\t# signal.signal returns the previous handler\n\t\tself.old_handler = signal.signal(signal.SIGINT, self.handler)\n\n\t\tself.is_enabled = True\n\n\tdef handler(self, sig, frame):\n\t\tself.signal_received = (sig, frame)\n\t\tprint('SIGINT received. Delaying until finished.')\n\n\tdef disable(self):\n\t\tif not self.is_enabled:\n\t\t\treturn\n\n\t\tsignal.signal(signal.SIGINT, self.old_handler)\n\t\tif self.signal_received:\n\t\t\tself.old_handler(*self.signal_received)\n\n\t\tself.is_enabled = False\n\n\tdef __exit__(self, type, value, traceback):\n\t\tself.disable()\n" }, { "alpha_fraction": 0.7066858410835266, "alphanum_fraction": 0.7160316109657288, "avg_line_length": 33.12883377075195, "blob_id": "1286a9ab478cc49bfdeb8aee9d45c29e8917977a", "content_id": "afbfe2fb781d06a5f2e107a49ecb77bf950ce1a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5564, "license_type": "permissive", "max_line_length": 102, "num_lines": 163, "path": "/Convert (deprecated)/convert_helper.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This holds a large number of specific functions used in 'convert_script.py'.\n\nimport os\nimport sys\nimport json\n\nimport cv2\nimport dlib\n\n# import `consts.py` from the parent directory\nfrom os.path import abspath, join, dirname\nsys.path.append(abspath(join(dirname(abspath(__file__)), '..')))\nimport consts\n\n# indexes in dlib's 68-point landmark shape predictor\n# taken from:\n# https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/,\n# https://www.pyimagesearch.com/wp-content/uploads/2017/04/facial_landmarks_68markup.jpg\nl_eye_l_edge = 36\nl_eye_r_edge = 39\nr_eye_l_edge = 42\nr_eye_r_edge = 45\n\n# pre-load the face-detection from dlib so they we have them while we do cropping\ndetector = dlib.cnn_face_detection_model_v1(consts.face_detector_path)\npredictor = dlib.shape_predictor(consts.facial_landmark_detector_path)\n\nprevious_write = \"\"\ndef write_flush(s):\n\tglobal previous_write\n\n\tsys.stdout.write(s)\n\tsys.stdout.flush()\n\tprevious_write = s\n\ndef query_yes_no(init_msg, retry_msg, delayed_interrupt=None):\n\tif delayed_interrupt is not None:\n\t\tdelayed_interrupt.disable()\n\twrite_flush(init_msg)\n\n\twhile True:\n\t\tresponse = sys.stdin.readline().strip()\n\t\t\n\t\tif response == 'y' or response == 'n':\t\n\t\t\tif delayed_interrupt is not None:\n\t\t\t\tdelayed_interrupt.enable()\n\t\t\treturn response\n\n\t\twrite_flush(retry_msg)\n\n\ndef get_namespace(delayed_interrupt):\n\tprevious_line = previous_write\n\n\tnamespace = {}\n\tif os.path.exists(consts.namespace_path):\n\t\twith open(consts.namespace_path, 'r') as f:\n\t\t\tnamespace = json.load(f)\n\telse:\n\t\tresponse = query_yes_no('\\rNo namespace file found. Continue? (y/n): ',\n\t\t\t\t\t\t\t\t\"Please enter 'y' or 'n'. Continue? (y/n): \",\n\t\t\t\t\t\t\t\tdelayed_interrupt)\n\t\tif response == 'n':\n\t\t\tprint(\"Found no file at: '{}'\".format(consts.namespace_path))\n\t\t\treturn None\n\n\t\twrite_flush(previous_line)\n\t\n\treturn namespace\n\ndef update_namespace(new_namespace):\n\twith open(consts.namespace_path, 'w+') as f:\n\t\tjson.dump(new_namespace, f)\n\n# This function is separated so that it can be changed later\ndef parse_import_filename(filename):\n\tid_and_ext = filename.split('.')\n\t# returns id, ext\n\treturn id_and_ext[0], id_and_ext[1]\n\n# renames the file with the given name, and returns the new name\n#\n# This function assumes that our working directory is the top-level directory of the project\ndef rename_with_namespace(delayed_interrupt, filename):\n\tsentence_id, ext = parse_import_filename(filename)\n\n\tnamespace = get_namespace(delayed_interrupt)\n\tif namespace is None:\n\t\treturn None\n\n\tn = namespace[sentence_id] if sentence_id in namespace else 0\n\tnamespace[sentence_id] = n + 1\n\n\tnew_filename = \"{}-{}.{}\".format(sentence_id, n, ext)\n\tnew_filepath = os.path.join(consts.import_dir, new_filename)\n\told_filepath = os.path.join(consts.import_dir, filename)\n\tos.rename(old_filepath, new_filepath)\n\n\t# We wait until the end to update `namespace` because we don't want to write to the file with\n\t# our changes to `namespace` until we know that we need to (i.e. until we've renamed the file)\n\t#\n\t# In reality, we wait on every part of this to finish, because of how it's called in\n\t# `convert_script.py`, but -- in case that doesn't work (or the process is killed) -- it's good\n\t# to be safe about it.\n\t# The specific concern here is that `os.rename` fails\n\tupdate_namespace(namespace)\n\n\treturn new_filename\n\n# moves the given directory entry (e.g. file, subdirectory) in `init_dir` with the given `filename`\n# into `targ_dir` \ndef move(filename, init_dir, targ_dir):\n\tos.rename(os.path.join(init_dir, filename), os.path.join(targ_dir, filename))\n\n# makes a subdirectory with `subdir_name` within `containing_dir` and outputs a sequence of images to \ndef convert_to_imgs(filename, subdir_name, containing_dir):\n\tfilepath = os.path.join(containing_dir, filename)\n\t\n\tsubdir_path = os.path.join(containing_dir, subdir_name)\n\tos.makedirs(subdir_path)\n\n\tfile_names = os.path.join(subdir_path, consts.img_file_format)\n\n\tos.system(\"ffmpeg -loglevel panic -i {} -r {} {}\".format(filepath, consts.fps, file_names))\n\ndef crop_image(i, img_path, face_box):\n\t# unfortunately, it appears we can't simply use the same image both times; we have to\n\t# independently load it twice. In its C++ documentation, dlib lists `cv_image()` as a method\n\t# for generating a dlib image from an opencv image, but that feature does not seem to be\n\t# available for Python.\n\n\tshape = predictor(dlib.load_rgb_image(img_path), face_box)\n\n\timg = cv2.imread(img_path)\n\n\toutput_dim_ratio = float(consts.output_height) / float(consts.output_width)\n\n\tdef crop_helper(write_file, l_edge_index, r_edge_index):\n\t\t# get the bounds of the cropped region on the x-axis\n\t\tx1 = shape.part(l_edge_index).x\n\t\tx2 = shape.part(r_edge_index).x\n\n\t\t# from our given x-axis bounds, determine our y coordinates. We'll center the cropped\n\t\t# region at the average of the corners of the eye (given by the shape indexes)\n\t\ty_center = (shape.part(l_edge_index).y + shape.part(r_edge_index).y) / 2\n\t\theight = output_dim_ratio * (x2 - x1)\n\t\ty1 = int(y_center - height/2)\n\t\ty2 = int(y_center + height/2)\n\n\t\t# get the new, cropped image and scale it\n\t\tnew_img = img[y1:y2, x1:x2]\n\t\t\n\t\tscale_factor = float(consts.output_height) / float(int(height))\n\t\tnew_width = (x2 - x1) * scale_factor\n\t\tnew_height = (y2 - y1) * scale_factor\n\t\t\n\t\tnew_img = cv2.resize(new_img, (int(new_width), int(new_height)))\n\n\t\t# save the image\n\t\tcv2.imwrite(write_file, new_img)\n\n\tcrop_helper(consts.left_eye_format.format(i), l_eye_l_edge, l_eye_r_edge)\n\tcrop_helper(consts.right_eye_format.format(i), r_eye_l_edge, r_eye_r_edge)\n\n" }, { "alpha_fraction": 0.6369478106498718, "alphanum_fraction": 0.6369478106498718, "avg_line_length": 26.065217971801758, "blob_id": "830befcde45209b0da74f084bdc13b2ef975bd12", "content_id": "5b9a0504a4cb2ee46316be42c178444dea922ae0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1245, "license_type": "permissive", "max_line_length": 106, "num_lines": 46, "path": "/kaolin-eyeswipe-recorder/EyeSwipeRecorder/IntroViewController.swift", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "//\n// IntroViewController.swift\n// EyeSwipeRecorder\n//\n//\n\nimport UIKit\n\nclass IntroViewController: UIViewController {\n \n @IBOutlet weak var textField: UITextField!\n var userEmail : String?\n override func viewDidLoad() {\n super.viewDidLoad()\n \n } \n @IBAction func emailEnter(_ sender: UITextField) {\n userEmail = sender.text!\n }\n @IBAction func buttonPressed(_ sender: Any) {\n if textField.isFirstResponder {\n userEmail = textField.text!\n textField.resignFirstResponder()\n }\n }\n override func prepare(for segue: UIStoryboardSegue, sender: Any?) {\n if let vc = segue.destination as? ViewController {\n vc.userEmail = userEmail\n }\n }\n override func shouldPerformSegue(withIdentifier identifier: String, sender: Any?) -> Bool {\n return userEmail != nil\n }\n \n\n /*\n // MARK: - Navigation\n\n // In a storyboard-based application, you will often want to do a little preparation before navigation\n override func prepare(for segue: UIStoryboardSegue, sender: Any?) {\n // Get the new view controller using segue.destination.\n // Pass the selected object to the new view controller.\n }\n */\n\n}\n" }, { "alpha_fraction": 0.7540650367736816, "alphanum_fraction": 0.7540650367736816, "avg_line_length": 40.08333206176758, "blob_id": "4780c9f128d3dc51f5087e48428fbb2d31a98cdb", "content_id": "578bc8423250660ab78edc8aca30020cf0ad98ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 492, "license_type": "permissive", "max_line_length": 97, "num_lines": 12, "path": "/Model/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Model\n\nThis directory contains the files pertinent to constructing our model. 'model.py' serves as a set\nof building blocks for the type of model that we will be using.\n\n### Files\n\n* [model.py](model.py): The helper classes for creating our end-to-end model.\n* [test_model.py](test_model.py): A script that simply checks that a sample model can run without\n\tany faults.\n* [notes.md](notes.md): A collection of notes pertaining to our model and possible improvements\n\tthat can be made to it." }, { "alpha_fraction": 0.7226107120513916, "alphanum_fraction": 0.7319347262382507, "avg_line_length": 27.600000381469727, "blob_id": "ff0f4398ad1208096bf89b5b158e40605b7e0f50", "content_id": "109e7cab5fd4c53d7d07ac23d16d3835cc4d5c03", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "permissive", "max_line_length": 95, "num_lines": 30, "path": "/consts.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This file serves as a place to put all of the various constants used by parts of this system.\n\nimport os\ncontaining_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef wrap(path):\n\treturn os.path.join(containing_dir, path)\n\ndata_dir = wrap('Data')\nimport_dir = wrap('Import')\nsource_data_dir = wrap('SourceData')\ncheckpoint_dir = wrap('Checkpoints')\ncheckpoint_prefix = wrap('Checkpoints/ckpt')\n\nnum_total_file = 'total'\nleft_eye_format = \"l_{}.jpg\"\nright_eye_format = \"r_{}.jpg\"\n\noutput_height = 20\noutput_width = 30\nfps = 30\nnamespace_path = wrap('Convert/namespace.json')\nface_detector_path = wrap('Convert/mmod_human_face_detector.dat')\nfacial_landmark_detector_path = wrap('Convert/shape_predictor_68_face_landmarks.dat')\nimg_file_format = \"%d.jpg\"\n\nfrom Sentences import sets\n\n# the final model output size\nfinal_output_size = len(sets.output_set)\n" }, { "alpha_fraction": 0.7534246444702148, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 31.5, "blob_id": "3e9be7c6e1e3bc96797a8138189781a23a265919", "content_id": "7e71e1ca929349dcc25706ba5f8d50b8bc6c630c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "permissive", "max_line_length": 79, "num_lines": 18, "path": "/Sentences/sentence_consts.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# These are all of the local constants used for lexicon and sentence generation\n\nimport os\ncontaining_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef wrap(path):\n\treturn os.path.join(containing_dir, path)\n\nlexicon_dir = wrap('Lexicon')\ngen_dir = wrap('Generation')\n\nlexicon_file = os.path.join(lexicon_dir, 'lexicon.txt')\n\nsentences_subset = wrap('sentences_subset.txt')\nannotated_sentences_file = wrap('sentences_subset_annotated.txt')\nsentence_dict_file = wrap('sentence_dict.json')\ncleaned_sentences = wrap('sentences.txt')\nrandom_sentence_list = wrap('random_sentences.txt')" }, { "alpha_fraction": 0.7564870119094849, "alphanum_fraction": 0.7564870119094849, "avg_line_length": 25.421052932739258, "blob_id": "12a1ead23be7b5bd05785f864707557a61665f81", "content_id": "1536a1d2703de585e04f92144e0ae5a2e8686b64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 501, "license_type": "permissive", "max_line_length": 97, "num_lines": 19, "path": "/reset_deprecated.sh", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# This simply removes all items in 'Data' and 'SourceData' and copies the contents of 'temp_copy'\n# to 'Import'. Used for testing 'Convert/convert_script.py'\n#\n# This script should only be run from within its containing directory.\n\n# preserve READMEs\nmv Data/README.md Data_README.md\nmv SourceData/README.md SourceData_README.md\n\nrm -rf Data/*\nrm -f SourceData/*\n\n# put READMEs back\nmv Data_README.md Data/README.md\nmv SourceData_README.md SourceData/README.md\n\nrsync temp_copy/* Import/." }, { "alpha_fraction": 0.7887324094772339, "alphanum_fraction": 0.7887324094772339, "avg_line_length": 48.79999923706055, "blob_id": "006c54f048baca5368f96c73435add208a8a79af", "content_id": "dd669f5b0fd4c0978be06034f88bce0eea97d8bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 497, "license_type": "permissive", "max_line_length": 99, "num_lines": 10, "path": "/Import (deprecated)/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Import\n\n**NOTE: DEPRECATED.** Below is the purpose it previously served.\n\nThis directory serves as the landing place for all new data. Each video should be formatted such\nthat its name (before the filetype extension) should simply be the numerical ID corresponding to\nthe sentence it represents. For some more information on usage, see ['/Convert'](../Convert)\n\nAlso of note: This directory will usually remain empty, as the proper procedure involves processing\ndata here once we've received it." }, { "alpha_fraction": 0.7402885556221008, "alphanum_fraction": 0.7502774596214294, "avg_line_length": 32.407405853271484, "blob_id": "586f4b5636fb222691c8eb0c8cb4e80456ff56e1", "content_id": "f3024060c1bed4dc73b4cb251b8aec0240ef3695", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "permissive", "max_line_length": 72, "num_lines": 27, "path": "/Sentences/Generation/gen_consts.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import os\ncontaining_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef wrap(path):\n\treturn os.path.abspath(os.path.join(containing_dir, path))\n\nvideos_dataset = wrap('USvideos.csv')\nvideo_id_list = wrap('videoids.json')\nsubtitles_dir = wrap('youtube-subtitles')\nall_sentences = wrap('sentences_all.txt')\next = 'en.vtt'\n\n# the maximum amount of gap between subtitle blocks that for which we'll\n# consider them as continuous, measured in seconds\n#\n# Note: This is used in two ways. (1) To determine overlapped regions,\n# and (2) to indicate the start of a new sentence\nsubtitle_max_gap_time = 0.3\n\n# the required portion of subitles that need to have overlap in order to\n# consider it a part of the formatting for the video\nsubtitle_min_fraction = 0.5\n\n# These two constants dictate the minimum and maximum lengths of\n# sentences we'll allow with our cleaning script\nlower_bound = 3\nupper_bound = 15" }, { "alpha_fraction": 0.6305303573608398, "alphanum_fraction": 0.6394613981246948, "avg_line_length": 39.43611145019531, "blob_id": "f3202daac1009f40c765665293249c55ead88602", "content_id": "e51bd635f26c4bc2236eeb37af097727bfb9d196", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14556, "license_type": "permissive", "max_line_length": 125, "num_lines": 360, "path": "/Model/model.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This file establishes the component models that create the equivalent of Google's LAS \"Listener\", but for\n# streams of video inputs, using modifications to attention found in the paper:\n# \"END-TO-END ATTENTION-BASED LARGE VOCABULARY SPEECH RECOGNITION\" (2015)\n#\n# A couple notes:\n# * Because we want to allow variable-length sequences while knowing the shape of the individual\n# images, we're using `item_shape` to indicate the shape of the items in any sequence, be it the\n# input sequence of video frames or the condensed sequence passed between DSBLSTM layers\n#\n# * On the general format of the total model: We have frame-by-frame input provided to the Watcher,\n# which is composed of two parts: the convolutions of individual frames and the subsequent layers\n# of stacked bidirectional RNNs (either LSTMs or GRUs). This is created by WatcherBuilder and\n# outputs the \"hidden\" layer, the shorter sequence that is passed to the decoder.\n# * The decoder (analagous to LAS's AttendAndSpell) uses the attention proposed by the paper listed\n# above (ETEABLVSR) combined with an RNN decoder. Of note is that the attention mechanism is\n# tasked with finishing by itself (so it could theoretically run forever without other mechanisms\n# in place), so there are elements built in to prevent that from being an issue.\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport numpy as np\n\n__all__ = ['DeepStackedBiRNNBuilder', 'Speller']\n\n# for use in DeepStackedBiRNNBuilder\nclass ConcatPool(layers.Layer):\n def __init__(self, factor):\n super(ConcatPool, self).__init__()\n\n self.factor = factor\n\n def build(self, input_shape):\n return\n\n def call(self, inputs):\n # this is a special case that's only called during setup\n if inputs.shape[0] is None:\n return tf.concat([inputs, inputs], axis=2)\n\n num_items = inputs.shape[0] // self.factor * self.factor\n\n # new_shape = list(inputs.shape)\n # new_shape[0] //= self.factor\n # new_shape[-1] = -1\n\n t = tf.reshape(inputs[:num_items], [inputs.shape[0] // self.factor, 1, -1])\n return t\n\nclass DeepStackedBiRNNBuilder:\n def __init__(self, input_shape):\n self.inputs = [keras.Input(shape=input_shape), keras.Input(shape=input_shape)]\n\n # combine the two inputs\n flattened_left = layers.Flatten()(self.inputs[0])\n flattened_right = layers.Flatten()(self.inputs[1])\n combined = tf.concat([flattened_left, flattened_right], axis=1)\n # add a dimension to make it compatible\n ins = tf.expand_dims(combined, axis=1)\n\n self.fw = ins\n self.bw = tf.reverse(ins, [0])\n\n # last_birnn is (rnn_type, units, output_pooling, kwargs)\n self.last_birnn = None\n\n self.built = False\n\n\n def check_build_status(self):\n if self.built:\n raise Exception('Model has already been built')\n\n def add(self, units, rnn_type=layers.LSTM, output_pooling=1, **kwargs):\n self.check_build_status()\n\n kwargs[\"return_sequences\"] = True\n\n self.fw = rnn_type(units, **kwargs)(self.fw)\n self.bw = rnn_type(units, **kwargs)(self.bw)\n\n if output_pooling != 1:\n self.fw = ConcatPool(output_pooling)(self.fw)\n self.bw = ConcatPool(output_pooling)(self.bw)\n\n self.last_birnn = (rnn_type, units, output_pooling, kwargs)\n\n return self\n\n def repeat(self, n):\n self.check_build_status()\n\n if self.last_birnn is None:\n raise Exception('Nothing to repeat')\n\n (rnn_type, units, output_pooling, kwargs) = self.last_birnn\n\n for i in range(n):\n self.fw = rnn_type(units, **kwargs)(self.fw)\n self.bw = rnn_type(units, **kwargs)(self.bw)\n\n if output_pooling != 1:\n self.fw = ConcatPool(output_pooling)(self.fw)\n self.bw = ConcatPool(output_pooling)(self.bw)\n\n return self\n\n def build(self):\n self.check_build_status()\n\n # concatenate outputs to be given for each temporal slice so that we get both passes\n # (forward and backward) in the right order, then construct the model.\n\n self.bw = tf.reverse(self.bw, [0])\n\n outputs = tf.squeeze(tf.concat([self.fw, self.bw], axis=2), axis=1)\n\n return keras.Model(inputs=self.inputs, outputs=outputs)\n\n\n# This is our attention model, which is based on the attention model from:\n# \"End-to-End Attention-Based Large Vocabulary Speech Recognition\", 2016\n# link: https://arxiv.org/abs/1508.04395\n#\n# This type of attention could be referred to as: \"Windowed attention with convolutions\", but that\n# becomes far too lengthy for a class name.\n#\n# a note for names: in comments, 'hidden_size' refers to the size of the output of the encoder at\n# each time-step.\nclass Attention(tf.keras.Model):\n # w_l and w_r give the number of units to the left and right (respectively) of the previous\n # median to look\n def __init__(self, units, num_features, w_l, w_r, kernel_size=None, preference_left_pad=True):\n super(Attention, self).__init__()\n\n self.w_l = w_l\n self.w_r = w_r\n\n if kernel_size is None:\n kernel_size = self.window_size()\n\n self.kernel_size = kernel_size\n\n # add necessary padding in order to output convolutional features centered at each\n # attention weight\n l_pad = kernel_size // 2\n r_pad = kernel_size // 2\n\n if self.kernel_size % 2 == 0:\n # we need to take a unit out on one of the sides of the filter\n if preference_left_pad:\n r_pad -= 1\n else:\n l_pad -= 1\n\n self.padding_shape = tf.constant([[0, 0], [l_pad, r_pad], [0, 0]])\n\n # these are as they are described in the aforementioned paper. Instead of directly\n # representing them as a parameter matrix, we're describing them as single-layer\n # perceptrons, because they are functionally the same.\n self.U = layers.Dense(units)\n self.V = layers.Dense(units)\n self.W = layers.Dense(units)\n self.Q = layers.Conv1D(num_features, self.kernel_size, strides=1)\n\n # In the paper, this is simply a weight vector, but this difference should not matter.\n # The bias vector mentioned in the paper is not present here because the dense layers have\n # their own biases.\n self.w = layers.Dense(1)\n\n # if not None, then it has the shape of a 1D array\n self.previous_attention = None\n self.previous_window_bounds = None\n\n self.bounds_restriction = None\n\n # This should be called at the end of every sequence prediction, once all characters have been\n # generated.\n def reset(self):\n self.previous_attention = None\n self.previous_window_bounds = None\n\n # sets a restriction on where we can center the attention.\n def set_bounds_restriction(self, bounds_restriction):\n self.bounds_restriction = bounds_restriction\n\n def remove_bounds_restriction():\n self.bounds_restriction = None\n\n def window_size(self):\n return self.w_l + 1 + self.w_r\n\n # only for internal use.\n # takes window bounds (which may walk off the end of 'values') and returns a nicely padded\n # window.\n #\n # returns window with shape (1, window_size, hidden_size)\n def window_helper(values, bounds):\n # lb for lower bound, ub for upper bound\n (lb, ub) = bounds\n\n seq_len = values.shape[0]\n\n window = values[max(lb, 0):min(ub, seq_len), ...]\n if lb < 0:\n window = tf.pad(window, [[-1 * lb, 0], [0, 0]], \"CONSTANT\")\n if ub > seq_len-1:\n diff = ub - seq_len #-1\n window = tf.pad(window, [[0, diff], [0, 0]], \"CONSTANT\")\n\n return tf.expand_dims(window, 0)\n\n # returns window, with shape = (1, window_size, hidden_size)\n #\n # does not allow a window to be centered outside 'values'\n def get_window(self, values):\n # if this is the first round, just center the attention as close to the start as we can.\n if self.previous_attention is None:\n m = 0 if self.bounds_restriction is None else self.bounds_restriction(None)[0]\n\n # we include the +1 because we want to go 'w_r' units out from the center. This is just\n # like a normal range: [lower, upper)\n self.previous_window_bounds = (m - self.w_l, m + self.w_r +1)\n return Attention.window_helper(values, self.previous_window_bounds)\n\n\n # we'll use available_range to define the range that we're allowed to choose our median\n # within\n available_range = (self.bounds_restriction(self.previous_window_bounds)\n if self.bounds_restriction is not None\n else (0, self.window_size()))\n\n # take only the overlap of the two ranges. Even without bound restrictions, this is\n # necessary because the previous window may have gone outside the edge of 'values'\n #\n # lower >= 0, upper <= len(previous_attention)\n lower = max(available_range[0], self.previous_window_bounds[0]) - self.previous_window_bounds[0]\n upper = min(available_range[1], self.previous_window_bounds[1]) - self.previous_window_bounds[0]\n\n # we're squeezing here because the previous_attention has shape (1, window_size)\n a = tf.squeeze(self.previous_attention)[lower:upper].numpy()\n \n # get index of median. This seems to be the fastest supported way to do this, per this\n # post:\n # https://stackoverflow.com/questions/32923605/is-there-a-way-to-get-the-index-of-the-median-in-python-in-one-command\n m = np.argsort(a)[len(a) // 2]\n \n # now that we have our median in terms of its position in the previous window, we get its\n # absolute position in 'values'\n m += self.previous_window_bounds[0]\n\n # set new bounds. See comment above near the top of this function about the +1\n self.previous_window_bounds = (m - self.w_l, m + self.w_r +1)\n return Attention.window_helper(values, self.previous_window_bounds)\n\n # values has shape (hidden_sequence_length, hidden_size)\n # previous_rnn_state has shape (rnn_state_size)\n # returns the context vector and the bounds of the window\n #\n # Will return 'None' if the window is outside of the bounds of the hidden layer\n def call(self, values, previous_rnn_state):\n # window has shape (1, window_size, hidden_size)\n window = self.get_window(values)\n if window is None:\n return None\n\n # we need the shape to be (1, window_size, 1)\n if self.previous_attention is None:\n self.previous_attention = tf.zeros([1, self.window_size(), 1])\n\n # in the paper, this is notated as 'F'\n # shape = (1, window_size, kernel_size)\n features = self.Q(tf.pad(self.previous_attention, self.padding_shape))\n # shape = (1, window_size, units)\n _V = self.V(window)\n _U = self.U(features)\n\n # at first, _W has shape (1, units), so we repeat it to be (1, window_size, units)\n _W = self.W(previous_rnn_state)\n _W = tf.reshape(tf.tile(_W, [1, self.window_size()]), _V.shape)\n\n # notated as 'e_t'\n # shape = (1, window_size, 1)\n scores = self.w(tf.nn.tanh(_V + _W + _U))\n\n # notated as '\\alpha_t' and 'c_t', respectively\n # shape = (1, window_size, 1)\n attention_weights = tf.nn.softmax(scores)\n self.previous_attention = attention_weights\n\n # shape = (1, window_size, hidden_size)\n context = attention_weights * window\n\n # sum the various parts to produce a single, weighted vector -- this is an extension of\n # Bahdanau attention.\n # shape = (1, hidden_size) after reduction\n context_vector = tf.reduce_sum(context, axis=1)\n\n return context_vector\n\n\n# speller takes as input the hidden layer produced by Watcher and provides output probabilities of\n# each character\nclass Speller(tf.keras.Model):\n # bits of this are taken from the TensorFlow tutorial:\n # https://www.tensorflow.org/beta/tutorials/text/nmt_with_attention#write_the_encoder_and_decoder_model\n def __init__(self, attn_units, attn_conv_features, attn_window, rnn_units, output_size):\n super(Speller, self).__init__()\n\n self.attention = Attention(attn_units, attn_conv_features, attn_window[0], attn_window[1])\n self.rnn = layers.GRU(rnn_units, return_sequences=True, return_state=True)\n self.fc = layers.Dense(output_size)\n\n self.previous_rnn_state = None\n\n # values is the output from the Encoder\n # previous_output is a one-hot vector corresponding to the chosen output at the last time-step\n #\n # values shape = (1, hidden_sequence_length, hidden_size)\n # previous_output shape = (1, output_size)\n #\n # returns output: shape = (1, output_size); rnn_state: shape = (1, state_size)\n def call(self, values, previous_output):\n # This shouldn't typically be true, becase we'll feed it the start character\n if previous_output is None:\n previous_output = tf.zeros([1, self.fc.units])\n\n # we need to supply a state to the attention, because it doesn't have its own handling for\n # the 0th state.\n if self.previous_rnn_state is None:\n self.previous_rnn_state = tf.zeros([1, self.rnn.units])\n\n # shape = (1, hidden_size)\n context_vector = self.attention(values, self.previous_rnn_state)\n\n # rnn expects shape with ndims=3, so we need to add another dimension here\n # input shape = (1, 1, hidden_size + output_size)\n # rnn_output shape = (1, 1, rnn_output_size)\n # state shape = (1, state_size)\n rnn_output, state = self.rnn(tf.expand_dims(tf.concat([context_vector, previous_output], 1), axis=1))\n self.previous_rnn_state = state\n\n # shape = (1, rnn_output_size)\n rnn_output = tf.squeeze(rnn_output, axis=1)\n\n # shape = (1, output_size)\n output = self.fc(rnn_output)\n\n return output, state\n\n def reset(self):\n self.attention.reset()\n self.previous_rnn_state = None\n\n def set_bounds_restriction(self, bounds_restriction):\n self.attention.set_bounds_restriction(bounds_restriction)\n\n def remove_bounds_restriction(self):\n self.attention.remove_bounds_restriction()" }, { "alpha_fraction": 0.6336633563041687, "alphanum_fraction": 0.6450311541557312, "avg_line_length": 21.733333587646484, "blob_id": "2cde0c11989b706647c002e3108420d92ac81438", "content_id": "5edbed8a9507730ec87b6872f9cba876c67c4d76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2727, "license_type": "permissive", "max_line_length": 93, "num_lines": 120, "path": "/Server/src/io.go", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"io/fs\"\n\t\"bufio\"\n\t\"strings\"\n\t\"mime/multipart\"\n\t\"crypto/sha256\"\n\t\"crypto/rand\"\n\t\"encoding/binary\"\n\t\"encoding/hex\"\n\t\"os\"\n\t\"strconv\"\n)\n\ntype SentenceManager struct {\n\tsentences []string\n}\n\nfunc (sentManager *SentenceManager) loadSentences() {\n\tbytes, err := ioutil.ReadFile(\"../../Sentences/sentences.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsentManager.sentences = strings.Split(string(bytes), \"\\n\")\n}\n\nfunc (sentManager *SentenceManager) getSentence() string {\n\tif len(sentManager.sentences) == 0 {\n\t\tpanic(\"Attempted to get sentence before loading sentences.txt\")\n\t}\n\trandBytes := make([]byte, 8)\n\t_, err := rand.Read(randBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trandInt := int(binary.BigEndian.Uint64(randBytes))\n\tif randInt < 0 {\n\t\trandInt = -randInt\n\t}\n\tindex := randInt % len(sentManager.sentences)\n\treturn sentManager.sentences[index]\n}\n\nfunc storeVideo(userID string, metadata map[string][]string, videoReadFile *multipart.File) {\n\tuserDirectory := \"../data/\" + userID\n\tcheckAndCreateDir(userDirectory)\n\tsentenceHash := sha256.Sum256([]byte(metadata[\"sentence\"][0]))\n\tvideoDirectory := userDirectory + \"/\" + hex.EncodeToString(sentenceHash[:])\n\tcheckAndCreateDir(videoDirectory)\n\tfiles, _ := ioutil.ReadDir(videoDirectory)\n\tcopyNum := strconv.Itoa((len(files) / 2) + 1)\n\tvideoFileName := videoDirectory + \"/\" + copyNum + \".mov\"\n\tvideoWriteFile, err := os.OpenFile(videoFileName, os.O_RDWR|os.O_CREATE, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := videoWriteFile.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\ttransferFile(videoReadFile, videoWriteFile)\n\tmetadataFileName := videoDirectory + \"/\" + \"metadata-\" + copyNum + \".txt\"\n\tmetadataWriteFile, err := os.OpenFile(metadataFileName, os.O_RDWR|os.O_CREATE, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := metadataWriteFile.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tfor k, v := range metadata {\n\t\t_, err = metadataWriteFile.Write([]byte(k + \" : \" + v[0] + \"\\n\"))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc transferFile(sourceFile *multipart.File, targetFile *os.File) {\n\tr := bufio.NewReader(*sourceFile)\n\tw := bufio.NewWriter(targetFile)\n\tbuf := make([]byte, 1024)\n\tfor {\n\t\tn, err := r.Read(buf)\n\t\tif n == 0 {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\t_, err = w.Write(buf[:n])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\tif err := w.Flush(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc checkAndCreateDir(path string) {\n\tinfo, err := os.Stat(path)\n\tif os.IsNotExist(err) {\n\t\tos.Mkdir(path, fs.ModeDir)\n\t\terr = nil\n\t}\n\tif info != nil && !info.IsDir() {\n\t\tpanic(\"File \" + path + \" exists and is not a directory\")\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}" }, { "alpha_fraction": 0.7009345889091492, "alphanum_fraction": 0.7028037309646606, "avg_line_length": 27.1842098236084, "blob_id": "c259840fae1898ec38f9f36874e9ac9b78b15cd8", "content_id": "295661a80eb5351ad2405020165fec6904f97477", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 99, "num_lines": 38, "path": "/Sentences/Generation/videolist.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# This file provides a list of all of the videoids that are in the trending videos dataset. If the\n# file has not already been created, this script will create 'videoids.json', which stores the list\n# as a json array for easy access.\n\nimport csv\nimport os\nimport json\n\nimport gen_consts as consts\n\n# If the file hasn't been made yet, write everything to the file so that we don't have to load the\n# csv every single time.\nif not os.path.exists(consts.video_id_list):\n\t# We make a set to ensure that we don't count the duplicates (there are many in the dataset)\n\tvideo_ids = []\n\tvideo_set = set()\n\n\twith open(consts.videos_dataset) as f:\n\t\tcsv_reader = csv.reader(f, delimiter=',')\n\n\t\tfirst_line = True\n\t\tfor row in csv_reader:\n\t\t\tif first_line:\n\t\t\t\tfirst_line = False\n\t\t\telse:\n\t\t\t\tvideo = row[0]\n\t\t\t\tif video not in video_set:\n\t\t\t\t\tvideo_ids.append(video)\n\t\t\t\t\tvideo_set.add(video)\n\n\n\t# store all of the videos\n\twith open(consts.video_id_list, 'w+') as f:\n\t\tjson.dump(video_ids, f, indent=4)\n\ndef video_ids():\n\twith open(consts.video_id_list) as f:\n\t\treturn json.load(f)" }, { "alpha_fraction": 0.6504566669464111, "alphanum_fraction": 0.6590965390205383, "avg_line_length": 32.48760223388672, "blob_id": "d775016f631eab4ffc6286ae9fa4b40b2f67545d", "content_id": "3dab9793d79eb231a6249caacc24569cb7439672", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4051, "license_type": "permissive", "max_line_length": 110, "num_lines": 121, "path": "/helper.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# an assorted collection of things that are referenced by 'main.py', but do not need to be defined\n# there.\nimport random\nimport json\nimport os\nfrom os import path\nimport timeit\n\nimport tensorflow as tf\nimport cv2\n\nimport consts\nfrom Sentences import sets\n\n# the set of videos we'll pull from\nset_of_videos = []\nwith open(consts.namespace_path) as f:\n namespace = json.load(f)\n for video_id, n in namespace.items():\n for i in range(0, n):\n set_of_videos.append((int(video_id), i))\n\ndef get_images(video, n):\n subdir = path.join(consts.data_dir, \"{}-{}\".format(video, n))\n\n total = path.join(subdir, consts.num_total_file)\n if not path.exists(total):\n raise Exception(\"No total file found ('{}' does not exist)\".format(total))\n with open(total) as f:\n size = int(f.read().strip())\n\n left_imgs = []\n right_imgs = []\n\n for i in range(0, size):\n left_path = path.join(subdir, consts.left_eye_format.format(i))\n right_path = path.join(subdir, consts.right_eye_format.format(i))\n\n l_img = cv2.imread(left_path)\n r_img = cv2.imread(right_path)\n\n if l_img is None:\n raise Exception(\"File '{}' not found.\".format(left_path))\n if r_img is None:\n raise Exception(\"File '{}' not found.\".format(right_path))\n \n left_imgs.append(tf.convert_to_tensor(l_img, dtype=tf.float32))\n right_imgs.append(tf.convert_to_tensor(r_img, dtype=tf.float32))\n\n # left_imgs.append(tf.convert_to_tensor(cv2.imread(left_path), dtype=tf.float32))\n # right_imgs.append(tf.convert_to_tensor(cv2.imread(right_path), dtype=tf.float32))\n\n return (tf.convert_to_tensor(left_imgs), tf.convert_to_tensor(right_imgs))\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)\n\ndef index_list():\n l = set_of_videos.copy()\n random.shuffle(l)\n return l\n\ndef trial_with_random(img_process, dsbrnn, speller):\n # this is just an arbitrarily chosen number\n seq_len = 32\n input_shape = [consts.output_height, consts.output_width, 3]\n random_left = tf.random.uniform([seq_len] + input_shape)\n random_right = tf.random.uniform([seq_len] + input_shape)\n\n _, _ = speller(dsbrnn([img_process(random_left), img_process(random_right)]), None)\n\n# The following section is taken directly form the tensorflow 2.0 tutorial:\n# https://www.tensorflow.org/beta/tutorials/text/nmt_with_attention#define_the_optimizer_and_the_loss_function\noptimizer = tf.keras.optimizers.Adam()\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)\n\n# inputs is a tuple of two tensors, left eye and right eye\ndef train_step(img_process, dsbrnn, speller, inputs, targets):\n loss = 0\n\n with tf.GradientTape() as tape:\n values = dsbrnn([img_process(inputs[0]), img_process(inputs[1])])\n\n # teacher forcing, so we'll disregard the previous\n speller_input = tf.expand_dims(tf.one_hot(targets[0], len(sets.output_set), dtype=tf.float32), axis=0)\n for t in targets[1:]:\n # returns output, rnn_state\n predictions, _ = speller(values, speller_input)\n\n loss += loss_function(tf.expand_dims(t, axis=0), predictions)\n\n # teacher forcing\n speller_input = tf.expand_dims(tf.one_hot(t, len(sets.output_set), dtype=tf.float32), axis=0)\n\n batch_loss = loss / (len(targets) - 1)\n \n variables = img_process.trainable_variables + dsbrnn.trainable_variables + speller.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(gradients, variables))\n\n speller.reset()\n\n return batch_loss" }, { "alpha_fraction": 0.6257088780403137, "alphanum_fraction": 0.6658790111541748, "avg_line_length": 27.608108520507812, "blob_id": "00e718acba12b14e7b29d5d8547756c6dd9c3591", "content_id": "ab3804aac571c7b3031935443391c9378167e4fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2116, "license_type": "permissive", "max_line_length": 184, "num_lines": 74, "path": "/Server/src/server_test.go", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"net/http\"\n\t\"io\"\n\t\"mime/multipart\"\n\t\"bytes\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\nfunc TestGetSentence(*testing.T) {\n\tresp, err := http.Get(\"http://localhost:8080/sentence/get\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbytes := make([]byte, 256)\n\t_, err = resp.Body.Read(bytes)\n\tif err != nil && err != io.EOF {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Sentence recieved: \" + string(bytes))\n}\n\nfunc TestStoreVideo(*testing.T) {\n\ttestFile, err := os.Open(\"../testing/test.mov\")\n\tif err != nil && os.IsNotExist(err) {\n\t\tpanic(\"Please supply a test video file 'test.mov' in ../testing\")\n\t}\n\tdefer func() {\n\t\terr := testFile.Close()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tvar b bytes.Buffer\n\tw := multipart.NewWriter(&b)\n\tfw, err := w.CreateFormField(\"userID\")\n\tif _, err = io.Copy(fw, strings.NewReader(\"testID\")); err != nil {\n\t\tpanic(err)\n\t}\n\tfw, err = w.CreateFormField(\"sentence\")\n\tif _, err = io.Copy(fw, strings.NewReader(\"This is a test sentence.\")); err != nil {\n\t\tpanic(err)\n\t}\n\tfw, err = w.CreateFormFile(\"videoFile\", testFile.Name())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif _, err = io.Copy(fw, testFile); err != nil {\n\t\tpanic(err)\n\t}\n\tw.Close()\n\treq, err := http.NewRequest(\"POST\", \"http://localhost:8080/data/upload\", &b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\tclient := &http.Client{}\n\tclient.Do(req)\n\tfmt.Println(\"Sent test data --- checking for integrity\")\n\tfmt.Println(\"Difference between expected metadata file and actual metadata file:\")\n\tmetadataCheckCmd := exec.Command(\"diff\", \"-a\", \"--text\", \"../testing/metadata-1.txt\", \"../data/testID/33eb0576bd8ecb5317c08dfaa4c3c2853ac740b23c248ef65959c4fe12eca4cf/metadata-1.txt\")\n\tmetadataCheckCmd.Run()\n\tfmt.Println(\"Difference between expected video file and actual video file:\")\n\tvideoCheckCmd := exec.Command(\"diff\", \"-a\", \"--text\", \"../testing/test.mov\", \"../data/testID/33eb0576bd8ecb5317c08dfaa4c3c2853ac740b23c248ef65959c4fe12eca4cf/test.mov\")\n\tvideoCheckCmd.Run()\n\tfmt.Println(\"Cleaning test directories\")\n\trmAllCmd := exec.Command(\"rm\", \"../data/testID\", \"-r\")\n\trmAllCmd.Run()\n}" }, { "alpha_fraction": 0.6684170961380005, "alphanum_fraction": 0.6856713891029358, "avg_line_length": 24.653846740722656, "blob_id": "6d42f65e7e1ee617657fb0cf6f23baac361db5ef", "content_id": "c0a0f71c6cc13234249cc64af3b0fc58a12779a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1333, "license_type": "permissive", "max_line_length": 96, "num_lines": 52, "path": "/Server/src/handlers.go", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"net/http\"\n)\n\nfunc (sentManager *SentenceManager) sentenceGetHandler(w http.ResponseWriter, r *http.Request) {\n\tsentence := sentManager.getSentence()\n\tw.Write([]byte(sentence))\n}\n\nfunc videoUploadHandler(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseMultipartForm((1 << 20) * 100)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Failed to parse form: \" + err.Error()))\n\t\treturn\n\t}\n\tstringMap := r.MultipartForm.Value\n\tfileMap := r.MultipartForm.File\n\tuserID, ok := stringMap[\"userID\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Field 'userID' not included in video upload\"))\n\t\treturn\n\t}\n\t_, ok = stringMap[\"sentence\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Field 'sentence' not included in video upload\"))\n\t\treturn\n\t}\n\tvideoFileHeader, ok := fileMap[\"videoFile\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Field 'videoFile' not included in video upload\"))\n\t\treturn\n\t}\n\tvideoFile, err := videoFileHeader[0].Open()\n\tdefer func() {\n\t\tif err := videoFile.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Video file failed to open\"))\n\t\treturn\n\t}\n\tstoreVideo(userID[0], stringMap, &videoFile)\n\treturn\n}" }, { "alpha_fraction": 0.7703900933265686, "alphanum_fraction": 0.771276593208313, "avg_line_length": 55.45000076293945, "blob_id": "c3e2932b9b8ea223bc9831ce8d9619717a3d7c92", "content_id": "5250ff2678f22d0b299da5c2981d40d85e5bea87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1128, "license_type": "permissive", "max_line_length": 98, "num_lines": 20, "path": "/Data (deprecated)/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Data\n\n**NOTE: DEPRECATED.** Below is the purpose it previously served.\n\nThis is the directory in which we store all of the input images to our model. Each directory is\ngiven a name corresponding to the video file in 'SourceData' with the same name. All of the files\nin this directory are left ignored by git, with the exception of this README -- backing up the\ndata itself to github is unecessary.\n\nEach directory here is formatted by '$ID-$N', where $ID is the numerical ID corresponding to the\nsentence it represents, and $N is a unique non-negative number used to prevent naming conflicts.\n$N is simply the number of videos for the sentence given by $ID that are already present in the\ndataset before this video has been added.\n\nWithin each directory, there are two sets of files: images cropped to the left eye are written as\n'l_$F.jpg', where $F is the frame of the video, starting at 0. Likewise, the images corresponding\nto the right eye are written as 'r_$F.jpg'. There is also a single file - 'total' - that indicates\nthe number of frames.\n\nThe format of every part of this directory will likely change in the future." }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 26, "blob_id": "471481cd2bbd0dfe2dea8aeb8ec7c82a28c20cb7", "content_id": "8da36c573fae87e3477d594ce498888d5bbd23da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "permissive", "max_line_length": 26, "num_lines": 1, "path": "/Model/__init__.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "__all__ = ['model_helper']" }, { "alpha_fraction": 0.703984797000885, "alphanum_fraction": 0.7077798843383789, "avg_line_length": 30.058822631835938, "blob_id": "dce23f494760d6187ca725df365b35f70fa4764f", "content_id": "478e98cb8b84cd09dd1964a96948acfd16cc0924", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "permissive", "max_line_length": 98, "num_lines": 17, "path": "/Sentences/add_sentence_numbers_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Adds index numbers to every sentence found in `new_sentences.txt` and saves the file as\n# `sentences_annotated.txt`\n#\n# Note: If there is a larger stream of incoming sentences, we can change this to keep track of the\n# current number so that we start this with that and append to that file.\n\nimport os\n\nsentences = []\n\nwith open(\"sentences.txt\") as f:\n\tfor line in f:\n\t\tsentences.append(line.strip())\n\nwith open(\"sentences_annotated.txt\", \"a+\") as f:\n\tfor i, s in enumerate(sentences):\n\t\tf.write(\"{:<3}: {}\\n\".format(i+1, s))" }, { "alpha_fraction": 0.6239669322967529, "alphanum_fraction": 0.6549586653709412, "avg_line_length": 30.225807189941406, "blob_id": "faf6acd6a4ad581c39caa6e0642068da2ee0abb8", "content_id": "70b59e119d5a13a290d7696dea1317555c5163dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2904, "license_type": "permissive", "max_line_length": 95, "num_lines": 93, "path": "/main_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import time\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom Model.model import *\nimport helper\nimport consts\nimport Data\nfrom Sentences import sets\n\nprint('Creating model...')\n\n# Currently [20, 30, 3]\ninput_shape = [consts.output_height, consts.output_width, 3]\n\n# Note: We are intentionally not using pooling here -- see the results of this paper:\n# https://arxiv.org/pdf/1412.6806.pdf\n#\n# A lot of this is just guess-work. This is one of the more tweak-able parts of the model.\n# init = keras.initializers.he_normal\nimg_process = keras.Sequential([\n layers.Conv2D(15, (7, 7), activation='relu', input_shape=input_shape),\n # shape = (14, 24, 30)\n layers.Conv2D(15, (3, 3), activation='relu'),\n # shape = (12, 22, 15)\n # pool\n layers.Conv2D(15, (3, 3), strides=(2, 2), activation='relu'),\n # shape = (5, 10, 15)\n layers.Conv2D(30, (3, 3), activation='relu'),\n # shape = (3, 8, 30)\n])\n\n# print('Image processor done. Summary:')\n# img_process.summary()\n\n# create our stacked birnn layers\n# in this case, the input shape is (3, 8, 30)\ndsbrnn_input_shape = img_process.layers[-1].output_shape[1:]\ndsbrnn = DeepStackedBiRNNBuilder(dsbrnn_input_shape)\ndsbrnn.add(40, output_pooling=2).repeat(2)\ndsbrnn.add(20)\ndsbrnn = dsbrnn.build()\n\n# print('DSBRNN done. Summary:')\n# dsbrnn.summary()\n\nattn_units = 10\nattn_conv_features = 4\nattn_window = (0, 4)\nrnn_units = 20\noutput_size = consts.final_output_size\nspeller = Speller(attn_units, attn_conv_features, attn_window, rnn_units, output_size)\n\n# test a sample input to get speller summary\nhelper.trial_with_random(img_process, dsbrnn, speller)\n\nprint('Parameter counts:')\nprint(\"\\timg_process: {}\".format(img_process.count_params()))\nprint(\"\\tdsbrnn: {}\".format(dsbrnn.count_params()))\nprint(\"\\tspeller: {}\".format(speller.count_params()))\n\n# establish checkpoint\ncheckpoint = tf.train.Checkpoint(optimizer=helper.optimizer,\n img_process=img_process,\n dsbrnn=dsbrnn,\n speller=speller)\n\n# train\ndset = helper.index_list()\n\nEPOCHS = 10\nfor epoch in range(EPOCHS):\n start = time.time()\n\n total_loss = 0\n for batch, (video_id, n) in enumerate(dset):\n img_seq = helper.get_images(video_id, n)\n targets = sets.as_labels(sets.from_id(video_id))\n\n batch_loss = helper.train_step(img_process, dsbrnn, speller, img_seq, targets)\n total_loss += batch_loss\n\n if batch % 10 == 0:\n print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy()))\n\n # save a checkpoint every other epoch\n if (epoch + 1) % 2 == 0:\n checkpoint.save(file_prefix = consts.checkpoint_prefix)\n\n print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / len(dset)))\n print('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))\n" }, { "alpha_fraction": 0.6861198544502258, "alphanum_fraction": 0.7263407111167908, "avg_line_length": 27.177778244018555, "blob_id": "c74a3693f7bb8cb0b6dd9deb458200088bbef1af", "content_id": "c34d19fa5e1848d847062faadf1f37f60c363a29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "permissive", "max_line_length": 86, "num_lines": 45, "path": "/Model/test_model.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom model import *\n\nprint('Testing a sample model')\n\n# define the necessary input\nsample_input = tf.random.uniform([60, 100, 100, 3])\ninput_shape = sample_input.shape[1:]\nseq_len = sample_input.shape[0]\n\n# base convolutional operations\nvideo_stream = keras.Input(shape=input_shape)\nl = layers.AveragePooling2D(pool_size=(2,2))(video_stream)\nl = layers.Conv2D(10, (5, 5), activation='relu')(l)\nl = layers.MaxPooling2D((2, 2))(l)\nl = layers.Conv2D(20, (3, 3), activation='relu')(l)\nl = layers.MaxPooling2D((2, 2))(l)\nl = layers.Conv2D(20, (3, 3))(l)\nimg_converter = keras.Model(inputs=video_stream, outputs=l)\n\nprint('image converter summary:')\nimg_converter.summary()\n\ndsbrnn_input_shape = l.shape[1:]\ndsbrnn = DeepStackedBiRNNBuilder(dsbrnn_input_shape)\ndsbrnn.add(40, output_pooling=2).repeat(2)\ndsbrnn.add(20)\ndsbrnn = dsbrnn.build()\n\nprint('dsbrnn summary')\ndsbrnn.summary()\n\nattn_units = 10\nattn_conv_features = 4\nattn_window = (0, 4)\nrnn_units = 20\noutput_size = 30\nspeller = Speller(attn_units, attn_conv_features, attn_window, rnn_units, output_size)\n\noutput, _ = speller(dsbrnn(img_converter(sample_input)), None)\nprint('sample output:', output)\nprint('done testing!')\n" }, { "alpha_fraction": 0.7792165875434875, "alphanum_fraction": 0.7798640131950378, "avg_line_length": 60.178218841552734, "blob_id": "44d906848db2e166bedbf708847c730e602771e7", "content_id": "2d0ad0ac2a023e993670068466c8110bd0888cd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6178, "license_type": "permissive", "max_line_length": 146, "num_lines": 101, "path": "/roadmap.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# TODO: Make a proper, separate README\n\nThis isn't it - we need one for newcomers to the project.\n\n# Project Roadmap\n\nThis files serves to document everything that has already been done in the project, along with\neverything that still needs to be done.\n\nThe project can be broken down into a few key systems: [Data Collection](#data-collection),\n[Model Building](#model-building), and [Final Usage](#final-usage).\n\n### Data Collection (Python, Go, Swift)\n\nThe end goal of our data collection is to provide a set of videos to use as training and test and\ntest data in constructing our model. This process can be further divided into sub-processes:\nSentence Generation, Dispatch, Recording, and Receiving. The specific data we'd like are videos of\nindividuals swiping certain sentences, as they would if they were actually using the system.\n\nSentence Generation is done in [Sentences/Generation](Sentences/Generation), giving the results to\nSentences/sentences.txt. Dispatching the sentences to the [data collection app](kaolin-eyeswipe-recorder)\nfor recording is done through the server, written in Go ([link](Server)). After the sentence has\nbeen swiped, the app uploads the video file to the server, along with all of the associated\nmetadata in a separate file.\n\n##### Sentence Generation (Done; Python)\n\nThe sentence-generation portion of the project is likely in its final state. It uses a set of of\nhand-written YouTube subtitles downloaded with the tool [`youtube-dl`](https://ytdl-org.github.io/youtube-dl/index.html).\nThe subtitles are parsed and sentences extracted from them using the lexicon created and stored in [`lexicon.txt`](Sentences/Lexicon/lexicon.txt).\nThis final result is used by the server.\n\n##### App (Swift)\n\nSome work has been done on the app. A friend of the project, Kaolin Fire ([1](https://github.com/kaolin),\n[2](http://erif.org/)), has kindly written a large chunk of the data-collection app. All of the\nassociated files have been placed into [kaolin-eyeswipe-recorder](kaolin-eyeswipe-recorder). The\namount of work left to do is unknown, but the primary things will be:\n* Ensuring start/stop of video recording is aligned with sentences\n * Note: There are many options for ensuring this -- long blink to start/stop is one that comes\n to mind, but there are sure to be others. There are performance implications for running\n facial landmark detectors while recording video but it *might* be fine.\n * IF we are already using facial landmarks, we can also store that data and send the\n frame-by-frame locations as metadata along with it to train the model. There are a couple\n libraries that work for this, the most notable being ML Kit, by Google. It works on iOS and\n they provide a facial landmark detector built in.\n* Fetching sentences from the server\n* Uploading video files and associated metadata (device name/type (screen size), orientation\n (camera on left vs. right), facial landmarks - if tracking that)\n\n##### Server (Go)\n\nAbout half of the functionality of the server has been completed. Dispatching sentences to users\nthat request them has been implemented (there's no documentation/specification, though), while\nreceiving and processing (formatting) incoming videos has not yet been done. Notes on those are\nlisted:\n* Receiving videos\n* Processing / Formatting videos\n * Some work on this has been done in a different context, before we decided to make a\n data-collection app. That work can be found in [Convert](Convert (deprecated)) - it is\n mostly to do with standardizing videos and breaking them apart into discrete frames.\n\nThe primary goal of the server is to gather the data necessary for the final model building.\n\n### Model Building (Python - TF 2.0)\n\nSome work has been done on this already - including a handful of research. In addition to\neverything that's been written in the files in the [Model](Model) subdirectory, there are a few\nthoughts outlined below. For reference, the generic idea outlined there consists of three layers:\na convolutional portion that operates on individual images; deep-stacked bidirectional LSTMs\n(yeilding a variable-size hidden layer); and a final attention-based decoder.\n\nThe primary file for actually training a model is `main_script.py`, which uses `helper.py` and\n`consts.py` extensively.\n\n###### Thoughts\n* Beam Search is a critical component that should be added (the implementation may prove difficult\n though)\n* With beam search, it may not be necessary to use bidirectional LSTMs. There are nice performance\n benefits to simplifying, and it may allow on-the-fly interpretation in a similar fashion to how\n Google's Google Assistant revises its interpretation as it goes - it also sometimes backtracks\n when new information comes in\n* Another idea: It may be possible to replace the DSBRNN layer with something akin to google's\n WaveNet - incorporating several previous frames in increasingly large time horizons.\n* Currently, the initial convolutional layers operate on a single frame at a time, but it may be\n advantageous to convolve on multiple frames at a time - either overlapping regions or collapsing.\n* The encoder/decoder architecture may also not be the best - transformers have been all the rage\n lately, so they may work better.\n * It's also entirely possible to take some other ideas from it - like multi-head attention\n instead of just single-head.\n* **Note**: While possible to avoid this, it's very probable that the best solution will involve\n some form of temporal pooling and attention over a hidden layer. No designs have been iterated\n though, so everything mentioned here is purely theoretical.\n\n### Final Usage (Swift, Python?)\n\nThe final usage of the EyeSwipe will be as an iPad app, using an embedded model that may or may not\ndo some additional training with the user. The model could also be converted to an ML Core model\n(which seems possible) in order to get the advantage of running Apple's software on their hardware.\nWe have not done speed comparisons for TensorFlow Lite and ML Core. TensorFlow Mobile is also\npossible, but Google recommends TensorFlow Lite as it is what they are focusing on moving forward." }, { "alpha_fraction": 0.7044107913970947, "alphanum_fraction": 0.7129690647125244, "avg_line_length": 28.211538314819336, "blob_id": "560b432aa7375ade8cc557f4553b2b56be2a17bf", "content_id": "2067b8c4b01550b20501f0f912c120772347234a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "permissive", "max_line_length": 133, "num_lines": 52, "path": "/Sentences/Generation/get_subtitles_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "import csv\nimport os\nimport inspect\nimport time\nimport signal\n\nimport videolist as vlist\nimport gen_consts as consts\n\nvideo_ids = vlist.video_ids()\n\n# The following line will change every time the script is interrupted to reflect where we need to\n# pick up on.\nstartAt = 6350\nline_of_start = inspect.stack()[0].lineno - 1\n\nindex = startAt\n\nif not os.path.exists(consts.subtitles_dir):\n\tos.makedirs(consts.subtitles_dir)\n\n# when formatted, produces something like:\n# \"youtube-dl --write-sub --sub-lang en --skip-download -o youtube-subtitles/kfPNxNIDHrA https://www.youtube.com/watch?v=kfPNxNIDHrA\"\ncmdBase = \"youtube-dl --write-sub --sub-lang en --skip-download -o {}{}{} https://www.youtube.com/watch?v={}\"\ntry:\n\tfor video in video_ids[startAt:]:\n\t\t# sleep so that we can allow ^C\n\t\ttime.sleep(0.3)\n\n\t\tprint(\"Video: {}\".format(index))\n\t\tcmd = cmdBase.format(consts.subtitles_dir, os.path.sep, video, video)\n\t\tos.system(cmd)\n\n\t\tindex += 1\n\n\t# because of the 'finally' block, this indicates that we're FULLY done\n\tindex += 1\nfinally:\n\t# modify this file's source so that we can change `startAt` to reflect the new place we've gotten to.\n\n\t# disable keyboard interrupt temporarily\n\told_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n\twith open(__file__) as f:\n\t\tsource_lines = f.readlines()\n\t# line numbers start at 1, not zero\n\tsource_lines[line_of_start -1] = 'startAt = {}\\n'.format(index - 1)\n\twith open(__file__, 'w') as f:\n\t\tfor l in source_lines:\n\t\t\tf.write(l)\n\n\tsignal.signal(signal.SIGINT, old_handler)\n" }, { "alpha_fraction": 0.7525706887245178, "alphanum_fraction": 0.76606684923172, "avg_line_length": 33.57777786254883, "blob_id": "195279012a2d35fbb44d3b2529396c04e976256a", "content_id": "0f334a573a64301a6f4958d73fee532f8cd1580c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1560, "license_type": "permissive", "max_line_length": 216, "num_lines": 45, "path": "/kaolin-eyeswipe-recorder/KBDView.swift", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "//\n// KBDView.swift\n// EyeSwipeRecorder\n//\n\n// see also all this cool shit: https://developer.apple.com/documentation/uikit/uikeyinput\n// maybe differently interesting: https://developer.apple.com/library/archive/documentation/StringsTextFonts/Conceptual/TextAndWebiPhoneOS/KeyboardManagement/KeyboardManagement.html\n/*\n// https://stackoverflow.com/questions/13005969/it-is-possible-to-show-keyboard-without-using-uitextfield-and-uitextview-iphone\nFor anyone, who wants to show keyboard without UITextField/UITextView for some reasons, could easily extend some view, which requires text input by implementing UIKeyInput protocol and canBecomeFirstResponder method.\nAnd if you override @property (nullable, readwrite, strong) UIView *inputAccessoryView; in the new class you can assign an inputAccessoryView to it too. โ€“ Alexandre G Mar 15 '16 at 4:52\nYour control that implements UIKeyInput can also implement UITextInputTraits if you want to customize the keyboard :) โ€“ Nathan Kot Apr 8 '16 at 2:13\n*/\n\nimport Foundation\nimport UIKit\n\nclass KBDView: UIView, UIKeyInput, UITextInputTraits {\n\tvar hasText: Bool = false\n\t\n\tfunc insertText(_ text: String) {\n\t\tif text == \" \" {\n\t\t\tNotificationCenter.default.post(name: .eyeswipeOnSpacebar, object: nil)\n\t\t}\n\t}\n\t\n\tfunc deleteBackward() {\n\t\t// pass :)\n\t\tNotificationCenter.default.post(name: .eyeswipeCancel, object: nil)\n\t}\n\t\n\toverride var canBecomeFirstResponder: Bool {\n\t\treturn true\n\t}\n\t\n\tvar autocorrectionType: UITextAutocorrectionType {\n\t\tget {\n\t\t\treturn .no\n\t\t}\n\t\tset {\n\t\t}\n\t}\n\n\t// class definition goes here\n}\n" }, { "alpha_fraction": 0.7796609997749329, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 41.28571319580078, "blob_id": "a63f2d8d0d21df24cb808b9fc69cc0e49efc03a1", "content_id": "298c1d0f6a54a7384f911ec6b0dfa6d175e336c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 295, "license_type": "permissive", "max_line_length": 96, "num_lines": 7, "path": "/SourceData (deprecated)/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# SourceData\n\n**NOTE: DEPRECATED.** Below is the purpose it previously served.\n\nThis directory holds all of the original video files that have been used to create our training\ndata. The training data can optionally be rebuilt from the files here. For more information, see\n[Convert](../Convert)." }, { "alpha_fraction": 0.7664029598236084, "alphanum_fraction": 0.7664029598236084, "avg_line_length": 55.578948974609375, "blob_id": "6676ea86f0d915a5bd0efa2b8f5ebbe42a911de1", "content_id": "ae258adf2880f5d213a06ac2bbf74d720b075afd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2149, "license_type": "permissive", "max_line_length": 111, "num_lines": 38, "path": "/Sentences/Generation/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Generation\n\nThis directory houses all of the scripts associated with generating the sentences to supply as\nour training data. These scripts don't necessarily need to be run many times, but will be in cases\nwhere components of the project are being re-built. The general pipeline is fairly straightforward.\n\nWe use a database of trending YouTube videos to get a list of videos to download subtitles from.\nThe tool 'youtube-dl' downloads the english subtitles (note: only those written by a human), which\nwe then parse into sentences.\n\n### Subdirectories\n\n* **youtube-subtitles**: Ingored by git. Stores all of the subtitles once they have been\n\tdownloaded.\n\n### Files\n\n* [gen_consts.py](gen_consts.py): Contains various constants (mostly filepaths) used by these\n\tscripts so that they can be modified from one central source.\n* [get_random_sentences_script.py](get_random_sentences_script.py): Provides a random subset of the\n\tsentences from 'Sentences/sentences.txt'\n* [videolist.py](videolist.py): Provides 'videolist.video_ids()', which returns a list of all of\n\tthe video IDs.\n* [get_subtitles_script.py](get_subtitles_script.py): This script fetches the manually-written\n\tsubtitles from youtube for each video. Note: To work properly, it may need to be reset (as it\n\tedits its source). It can be exited at any time (I find spamming ctrl+C to be sometimes\n\tnecessary), and it will ensure that it updates the starting index so the next time it's run, it\n\twill pick up where it left off.\n* [make_sentences.py](make_sentences.py): Parses the subtitles collected in 'youtube-subtitles' to\n\tgenerate a list of sentences, which is output to 'Sentences/sentences.txt'.\n* [videoids.json](videoids.json): Because it's a small file and requires USvideos.csv to generate,\n\tthis file is kept here. It's simply the list of YouTube video IDs\n\n##### Not present\n\n* **USvideos.csv**: A dataset of trending YouTube videos in the United States. One of the files\n\tpresent in the 'youtube-new' dataset, which can be found [here](https://www.kaggle.com/datasnaek/youtube-new).\n\t* Note: The file is also included in [Sentences/Sources](../Sources) as `USvideos.zip`." }, { "alpha_fraction": 0.7742234468460083, "alphanum_fraction": 0.7760778665542603, "avg_line_length": 54.33333206176758, "blob_id": "9366d0052914e3181e4b82fe739094cb50d7d99a", "content_id": "2b34229d768ff025ee7b5117f7a172306701b083", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2157, "license_type": "permissive", "max_line_length": 99, "num_lines": 39, "path": "/Sentences/README.md", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Sentences\n\nThis directory serves as a workspace for two main functions. The primary use is the generation of\nsentences from YouTube subtitles, to give as labels for our data-collectors. This process will\nlikely change soon, as we are planning to make a dedicated iOS app for data collection. The\nassociated resources for this can be found in [Sentences/Generation](Sentences/Generation).\n\nThe secondary use is the creation of a lexicon of \\~14k words. The lexicon currently only serves to\nhelp in the generation of sentences, but it will eventually be used to inform our language model.\n\n### Subdirectories\n\n* [Generation](Generation): A set of scripts and datasets for generating sentences.\n* [Lexicon](Lexicon): Scripts and datasets for generating a (\\~14k word) lexicon.\n* [Sources](Sources): All of the original source files for the datasets used in sentence/lexicon\n\tgeneration.\n\n### Files\n\n* [add_sentence_numbers_script.py](add_sentence_numbers_script.py): Will be deprecated soon.\n\tCreates 'sentences_annotated.txt', which gives the sentences found in 'sentences.txt' unique\n\tnumerical IDs.\n* [make_sentence_dict_script.py](make_sentence_dict_script.py): Constructs 'sentence_dict.json'\n\tfrom the list of sentences in order to provide a way to go from sentence ID back to the text of\n\tthe sentence.\n* [sets.py](sets.py): Various operations to do with these lists of sentences, mainly for use in\n\ttop-level scripts.\n* [sentences.txt](sentences.txt): A list of sentences. For the meantime, this is manually vetted\n\tfrom the larger list found in [Sentences/Generation](Sentences/Generation).\n* [sentences_annotated.txt](sentences_annotated.txt): Will be deprecated soon. The list of\n\tsentences, where each one is prefixed by its ID number.\n* [sentence_consts.py](sentence_consts.py): Stores various constants (primarily filepaths) used by\n\tscripts in this directory (and its subdirectories).\n\n##### Not present\n\n* **all_sentence_list.txt**: The output file for the list of sentences created by\n\t[Generation/make_sentences.py](Generation/make_sentences.py)\n* **sentence_dict.json**: A dictionary that maps sentence id to sentence text, stored as JSON." }, { "alpha_fraction": 0.7383390069007874, "alphanum_fraction": 0.7383390069007874, "avg_line_length": 30.39285659790039, "blob_id": "7eea1356d6f668fbbcb03963d24fc0e81879fb7d", "content_id": "a15e23aedda71895a5b051ab42bf5925521e665d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "permissive", "max_line_length": 97, "num_lines": 28, "path": "/Sentences/Generation/clean_script.py", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "# Filters the sentences in 'sentences_all.txt' to only include those that match certain criteria.\n# Those are listed below. The resulting list of sentences is then written to\n# 'Sentences/sentences.txt', the final list.\n\nimport re\nimport sys\n\nimport gen_consts as gconsts\n\n# import 'sentence_consts' from the parent directory\nfrom os.path import abspath, join, dirname\nsys.path.append(abspath(join(dirname(abspath(__file__)), '..')))\nimport sentence_consts as sconsts\n\n# returns whether the sentence should be kept\ndef filter_sentence(s):\n\tnum_words = len(s.split(' '))\n\tif num_words < gconsts.lower_bound or num_words > gconsts.upper_bound:\n\t\treturn False\n\n\treturn True\n\nwith open(gconsts.all_sentences) as f:\n\tsentences = f.read().split('\\n')\n\nas_string = '\\n'.join(filter(filter_sentence, sentences))\nwith open(sconsts.cleaned_sentences, 'w') as f:\n\tf.write(as_string + '\\n')\n" }, { "alpha_fraction": 0.7147600650787354, "alphanum_fraction": 0.7174165844917297, "avg_line_length": 35.06586837768555, "blob_id": "4b3968ef54ce8e12aeaf601b2a733d91655d43e4", "content_id": "9293b10f828c91f2a36b1a536848e5226c181072", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 12050, "license_type": "permissive", "max_line_length": 176, "num_lines": 334, "path": "/kaolin-eyeswipe-recorder/CameraController.swift", "repo_name": "EyeSwipe/EyeSwipe", "src_encoding": "UTF-8", "text": "//\n// CameraController.swift\n// EyeSwipeRecorder\n//\n\n// BETTER HANDLING OF THINGS, BUT IOS 13 (internet seems to have been scrubbed of iOS 12 version) โ€”\n// https://developer.apple.com/documentation/avfoundation/cameras_and_media_capture/avcam_building_a_camera_app\n// still has good info\n\nimport AVFoundation\nimport UIKit\nimport Photos\nimport CryptoKit\n\nclass CameraController: NSObject, AVCaptureFileOutputRecordingDelegate {\n\tvar captureSession: AVCaptureSession?\n\t\n\tvar frontCamera: AVCaptureDevice?\n\tvar frontCameraInput: AVCaptureDeviceInput?\n\tvar cancelling:Bool = false\n\t\n\tvar previewLayer: AVCaptureVideoPreviewLayer?\n \n var userEmail : String?\n var serverURL : String?\n var currentSentence : String?\n\t\n\tprivate var movieFileOutput: AVCaptureMovieFileOutput?\n\tprivate var backgroundRecordingID: UIBackgroundTaskIdentifier?\n\tprivate let sessionQueue = DispatchQueue(label: \"session queue\") // Communicate with the session and other session objects on this queue.\n \n\tfunc displayPreview(on view: UIView) throws {\n\t\tguard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }\n\t\t\n\t\tself.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)\n\t\tself.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill\n\t\tself.previewLayer?.connection?.videoOrientation = .portrait\n\t\t\n\t\tview.layer.insertSublayer(self.previewLayer!, at: 0)\n\t\t//self.previewLayer?.frame = view.frame\n\t}\n\t\n\t// from AVCam example, bits and hacks\n\t\n\tfunc completeRecording() {\n\t\tmovieFileOutput?.stopRecording()\n\t}\n\t\n\tfunc cancelRecording() {\n\t\tcancelling = true\n\t\tmovieFileOutput?.stopRecording()\n\t\t// TODO: cancel needs more logic strung throughout, remember it's canceling, oh if we cared about threading and race conditions oh my ;)\n\t}\n\t\n\tfunc startRecording(word:String) {\n\t\tguard let movieFileOutput = self.movieFileOutput else {\n\t\t\treturn\n\t\t}\n currentSentence = word\n\t\tsessionQueue.async {\n\t\t\tif !movieFileOutput.isRecording {\n\t\t\t\tif UIDevice.current.isMultitaskingSupported {\n\t\t\t\t\t/*\n\t\t\t\t\tSetup background task.\n\t\t\t\t\tThis is needed because the `capture(_:, didFinishRecordingToOutputFileAt:, fromConnections:, error:)`\n\t\t\t\t\tcallback is not received until AVCam returns to the foreground unless you request background execution time.\n\t\t\t\t\tThis also ensures that there will be time to write the file to the photo library when AVCam is backgrounded.\n\t\t\t\t\tTo conclude this background execution, endBackgroundTask(_:) is called in\n\t\t\t\t\t`capture(_:, didFinishRecordingToOutputFileAt:, fromConnections:, error:)` after the recorded file has been saved.\n\t\t\t\t\t*/\n\t\t\t\t\tself.backgroundRecordingID = UIApplication.shared.beginBackgroundTask(expirationHandler: nil)\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Update the orientation on the movie file output video connection before starting recording.\n\t\t\t\tlet movieFileOutputConnection = movieFileOutput.connection(with: .video)\n\t\t\t\t//\t\t\t\t\tmovieFileOutputConnection?.videoOrientation = previewLayer?.orie\n movieFileOutputConnection?.videoOrientation = AVCaptureVideoOrientation.landscapeRight\n\t\t\t\t\n\t\t\t\t/*let availableVideoCodecTypes = movieFileOutput.availableVideoCodecTypes\n\t\t\t\t\n\t\t\t\tif availableVideoCodecTypes.contains(.hevc) {\n\t\t\t\t\tmovieFileOutput.setOutputSettings([AVVideoCodecKey: AVVideoCodecType.hevc], for: movieFileOutputConnection!)\n\t\t\t\t}*/\n\t\t\t\t\n\t\t\t\t// Start recording to a temporary file.\n\t\t\t\tlet outputFileName = word + \"_\" + NSUUID().uuidString\n\t\t\t\tlet outputFilePath = (NSTemporaryDirectory() as NSString).appendingPathComponent((outputFileName as NSString).appendingPathExtension(\"mov\")!)\n\t\t\t\tmovieFileOutput.startRecording(to: URL(fileURLWithPath: outputFilePath), recordingDelegate: self)\n\t\t\t} else {\n\t\t\t\tmovieFileOutput.stopRecording()\n\t\t\t}\n\t\t}\n\t}\n\t\n\tfunc fileOutput(_ output: AVCaptureFileOutput, didStartRecordingTo fileURL: URL, from connections: [AVCaptureConnection]) {\n\t\t// Enable the Record button to let the user stop the recording.\n\t\tDispatchQueue.main.async {\n\t\t\t//self.recordButton.isEnabled = true\n\t\t\t//self.recordButton.setTitle(NSLocalizedString(\"Stop\", comment: \"Recording button stop title\"), for: [])\n\t\t\tprint(\"OK THEN RECORDING STARTED\")\n\t\t}\n\t}\n\t\n\tfunc fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {\n\t\t/*\n\t\tNote that currentBackgroundRecordingID is used to end the background task\n\t\tassociated with this recording. This allows a new recording to be started,\n\t\tassociated with a new UIBackgroundTaskIdentifier, once the movie file output's\n\t\t`isRecording` property is back to false โ€” which happens sometime after this method\n\t\treturns.\n\t\t\n\t\tNote: Since we use a unique file path for each recording, a new recording will\n\t\tnot overwrite a recording currently being saved.\n\t\t*/\n\t\tfunc cleanUp() {\n\t\t\tlet path = outputFileURL.path\n\t\t\tif FileManager.default.fileExists(atPath: path) {\n\t\t\t\tdo {\n\t\t\t\t\ttry FileManager.default.removeItem(atPath: path)\n\t\t\t\t} catch {\n\t\t\t\t\tprint(\"Could not remove file at url: \\(outputFileURL)\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tif let currentBackgroundRecordingID = self.backgroundRecordingID {\n\t\t\t\tbackgroundRecordingID = UIBackgroundTaskInvalid\n\t\t\t\tif currentBackgroundRecordingID != UIBackgroundTaskInvalid {\n\t\t\t\t\tUIApplication.shared.endBackgroundTask(currentBackgroundRecordingID)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tNotificationCenter.default.post(name: .eyeswipeVideoComplete, object: nil)\n\n\t\t}\n\t\t\n\t\tvar success = true\n\t\t\n\t\tif error != nil {\n\t\t\tprint(\"Movie file finishing error: \\(String(describing: error))\")\n\t\t\tsuccess = (((error! as NSError).userInfo[AVErrorRecordingSuccessfullyFinishedKey] as AnyObject).boolValue)!\n\t\t}\n\t\t\n\t\tif (cancelling) {\n\t\t\tcancelling = false\n\t\t\tcleanUp()\n\t\t} else if success {\n\t\t\t// Check authorization status.\n \n uploadVideo(paramName: \"videoFile\", fileName: \"test\", videoFile: outputFileURL, userEmail: self.userEmail!, serverURL : self.serverURL!, sentence: currentSentence!)\n cleanUp()\n // Below code saves video file to photo library, instead of uploading to server\n\t\t\t/*PHPhotoLibrary.requestAuthorization { status in\n\t\t\t\tif status == .authorized {\n\t\t\t\t\t// Save the movie file to the photo library and cleanup.\n\t\t\t\t\tPHPhotoLibrary.shared().performChanges({\n\t\t\t\t\t\tlet options = PHAssetResourceCreationOptions()\n\t\t\t\t\t\toptions.shouldMoveFile = true\n\t\t\t\t\t\tlet creationRequest = PHAssetCreationRequest.forAsset()\n\t\t\t\t\t\tcreationRequest.addResource(with: .video, fileURL: outputFileURL, options: options)\n\t\t\t\t\t}, completionHandler: { success, error in\n\t\t\t\t\t\tif !success {\n\t\t\t\t\t\t\tprint(\"Could not save movie to photo library: \\(String(describing: error))\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcleanUp()\n\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\tcleanUp()\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcleanUp()*/\n\t\t}\n\t\t\n\t\t// Enable the Camera and Record buttons to let the user switch camera and start another recording.\n\t\tDispatchQueue.main.async {\n\t\t\t// Only enable the ability to change camera if the device has more than one camera.\n\t\t\tprint(\"ALLDONE\")\n\t\t}\n\t}\n\t\n}\n\nfunc uploadVideo(paramName: String, fileName: String, videoFile: URL, userEmail : String, serverURL : String, sentence: String) {\n let url = URL(string: \"http://\\(serverURL):8080/data/upload\")\n let boundary = UUID().uuidString\n\n let session = URLSession.shared\n \n var urlRequest = URLRequest(url: url!)\n urlRequest.httpMethod = \"POST\"\n\n urlRequest.setValue(\"multipart/form-data; boundary=\\(boundary)\", forHTTPHeaderField: \"Content-Type\")\n\n var data = Data()\n \n data.append(\"\\r\\n--\\(boundary)\\r\\n\".data(using: .utf8)!)\n data.append(\"Content-Disposition: form-data; name=userID\\r\\n\\r\\n\".data(using: .utf8)!)\n if #available(iOS 13.0, *) {\n let hashed = SHA256.hash(data: Data(userEmail.utf8)).description\n let beginning = hashed.startIndex\n let startIndex = hashed.index(beginning, offsetBy: 17)\n let endIndex = hashed.index(startIndex, offsetBy: 16)\n print(hashed[endIndex..<endIndex])\n data.append(\"\\(hashed[startIndex..<endIndex])\".data(using: .utf8)!)\n } else {\n data.append(\"\\(userEmail)\".data(using: .utf8)!)\n }\n //let hashEmail = hashed!.description\n \n data.append(\"\\r\\n--\\(boundary)\\r\\n\".data(using: .utf8)!)\n data.append(\"Content-Disposition: form-data; name=sentence\\r\\n\\r\\n\".data(using: .utf8)!)\n data.append(\"\\(sentence)\".data(using: .utf8)!)\n\n data.append(\"\\r\\n--\\(boundary)\\r\\n\".data(using: .utf8)!)\n data.append(\"Content-Disposition: form-data; name=\\\"\\(paramName)\\\"; filename=\\\"\\(fileName)\\\"\\r\\n\".data(using: .utf8)!)\n data.append(\"Content-Type: video/mov\\r\\n\\r\\n\".data(using: .utf8)!)\n \n do {\n try data.append(Data(contentsOf: videoFile))\n } catch {\n print(\"Error uploading video file\")\n }\n\n data.append(\"\\r\\n--\\(boundary)--\\r\\n\".data(using: .utf8)!)\n\n // Send a POST request to the URL, with the data we created earlier\n session.uploadTask(with: urlRequest, from: data, completionHandler: { responseData, response, error in\n if error == nil {\n let jsonData = try? JSONSerialization.jsonObject(with: responseData!, options: .allowFragments)\n if let json = jsonData as? [String: Any] {\n print(json)\n }\n }\n }).resume()\n}\n\nextension Notification.Name {\n\tstatic let eyeswipeOnSpacebar = Notification.Name(\"eyeswipe-on-spacebar\")\n\tstatic let eyeswipeCancel = Notification.Name(\"eyeswipe-cancel\")\n\tstatic let eyeswipeVideoComplete = Notification.Name(\"eyeswipe-videocomplete\")\n}\n\nextension CameraController {\n\tfunc prepare(completionHandler: @escaping (Error?) -> Void) {\n\t\tfunc createCaptureSession() {\n\t\t\tself.captureSession = AVCaptureSession()\n\t\t}\n\t\t\n\t\tfunc configureCaptureDevices() throws {\n\t\t\t\n\t\t\tlet session = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)\n\t\t\t\n\t\t\tlet cameras = session.devices.compactMap { $0 }\n\t\t\tguard !cameras.isEmpty else { throw CameraControllerError.noCamerasAvailable }\n\t\t\t\n\t\t\tfor camera in cameras {\n\t\t\t\tif camera.position == .front {\n\t\t\t\t\tself.frontCamera = camera\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tfunc configureDeviceInputs() throws {\n\t\t\tguard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }\n\t\t\t\n\t\t\tif let frontCamera = self.frontCamera {\n\t\t\t\tself.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)\n\t\t\t\t\n\t\t\t\tif captureSession.canAddInput(self.frontCameraInput!) { captureSession.addInput(self.frontCameraInput!) }\n\t\t\t\telse { throw CameraControllerError.inputsAreInvalid }\n\t\t\t\t\n\t\t\t}\n\t\t\t\t\n\t\t\telse { throw CameraControllerError.noCamerasAvailable }\n\t\t}\n\t\t\n\t\tfunc configureOutput() throws {\n\t\t\tguard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }\n\t\t\tlet movieFileOutput = AVCaptureMovieFileOutput()\n\t\t\t\n\t\t\tif captureSession.canAddOutput(movieFileOutput) {\n\t\t\t\tcaptureSession.beginConfiguration()\n\t\t\t\tcaptureSession.addOutput(movieFileOutput)\n\t\t\t\tcaptureSession.sessionPreset = .high\n\t\t\t\tif let connection = movieFileOutput.connection(with: .video) {\n\t\t\t\t\tif connection.isVideoStabilizationSupported {\n\t\t\t\t\t\tconnection.preferredVideoStabilizationMode = .auto\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcaptureSession.commitConfiguration()\n\t\t\t\tself.movieFileOutput = movieFileOutput\n\t\t\t\tcaptureSession.startRunning()\n\t\t\t}\n\t\t}\n\t\t\t\n\t\t\tDispatchQueue(label: \"prepare\").async {\n\t\t\t\tdo {\n\t\t\t\t\tcreateCaptureSession()\n\t\t\t\t\ttry configureCaptureDevices()\n\t\t\t\t\ttry configureDeviceInputs()\n\t\t\t\t\ttry configureOutput()\n\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\tcatch {\n\t\t\t\t\tDispatchQueue.main.async {\n\t\t\t\t\t\tcompletionHandler(error)\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tDispatchQueue.main.async {\n\t\t\t\t\tcompletionHandler(nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n}\n\nextension CameraController {\n\tenum CameraControllerError: Swift.Error {\n\t\tcase captureSessionAlreadyRunning\n\t\tcase captureSessionIsMissing\n\t\tcase inputsAreInvalid\n\t\tcase invalidOperation\n\t\tcase noCamerasAvailable\n\t\tcase unknown\n\t}\n\t\n\tpublic enum CameraPosition {\n\t\tcase front\n\t\tcase rear\n\t}\n}\n" } ]
41
thatCarlyleGuy/bottlerocket
https://github.com/thatCarlyleGuy/bottlerocket
62447dd743405b4eb821841bd3b72fb0e823380b
19a96d1136fb19de8dc0bfdafd649c14a7893656
89b2c142f2b28deef4f8405e1c4b15fedd04f537
refs/heads/master
2023-06-16T15:21:01.227559
2021-07-03T21:35:47
2021-07-03T21:35:47
382,711,476
0
3
null
null
null
null
null
[ { "alpha_fraction": 0.49006420373916626, "alphanum_fraction": 0.49678996205329895, "avg_line_length": 31.719999313354492, "blob_id": "6af23f94a37eaa5ffbc1f4aa553f311d52ce42a8", "content_id": "95ffa6c3c2c4cc7f11f1563070f1402c55dc7a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3271, "license_type": "no_license", "max_line_length": 96, "num_lines": 100, "path": "/lib/iplot.py", "repo_name": "thatCarlyleGuy/bottlerocket", "src_encoding": "UTF-8", "text": "# Datatypes\nimport pandas as pd\n# Plotting\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\nclass IPlot(object):\n def __init__(self, pandas_df, vertical_spacing=0.01, shared_xaxes=True, cols = 1, **kwargs):\n self.df = pandas_df\n self.x_axis = self.df.axes[0].date\n self.fig = make_subplots(\n vertical_spacing=vertical_spacing,\n shared_xaxes=shared_xaxes,\n cols = cols,\n **kwargs\n )\n \n def add_simple_plot(self, go_plot, df_key, title = None, row = 3, col = 2, **kwargs):\n if title is None:\n title = df_key\n \n self.fig.append_trace(go_plot(\n x = self.x_axis,\n y = self.df[df_key],\n name = title,\n **kwargs\n ), row = row, col = col)\n \n return self \n \n def add_ohcl(self, name = 'OHCL', row = 1, col = 1):\n self.fig.append_trace(go.Candlestick(\n x = self.x_axis,\n open = self.df['open'],\n high = self.df['high'],\n low = self.df['low'],\n close = self.df['close'],\n name = name\n ), row = row, col = col)\n \n return self\n \n def add_volume(self, name = 'Volume', row = 2, col = 1):\n return self.add_simple_plot(go.Bar, 'volume', name, row, col)\n \n def add_scatter(self, df_key, title = None, row = 3, col = 1, **kwargs):\n return self.add_simple_plot(go.Scatter, df_key, title, row, col, **kwargs)\n \n def add_lines(self, lines):\n default_args = {'row': 3, 'col': 1}\n with_default_args = lambda args: {**default_args, **args}\n \n for line_args in lines:\n self.add_scatter(**with_default_args(line_args))\n \n return self\n \n def add_histogram(self, df_key, title = None, row = 3, col = 1, **kwargs):\n return self.add_simple_plot(go.Bar, df_key, title, row, col, **kwargs)\n \n def add_fill_between_lines(self, from_line, to_line, fill_color, row = 1, col = 1):\n from_args = from_line['args'] if 'args' in from_line else {} \n to_args = to_line['args'] if 'args' in to_line else {} \n \n self.fig.append_trace(go.Scatter(\n x = self.x_axis,\n y = pd.concat([\n self.df[from_line['df_key']],\n self.df[to_line['df_key']]\n ]),\n name = from_line['name'],\n fill = None,\n mode = 'lines',\n **from_args\n ), row = row, col = col)\n\n self.fig.append_trace(go.Scatter(\n x = self.x_axis,\n y = pd.concat([\n self.df[to_line['df_key']],\n self.df[to_line['df_key']]\n ]),\n name = to_line['name'],\n fill = fill_color,\n mode = 'lines',\n **to_args\n ), row = row, col = col)\n \n return self\n \n def show_plot(self, title = '', height = 900, xaxis_rangeslider_visible = False, **kwargs):\n self.fig.update_layout(\n height = height,\n xaxis_rangeslider_visible = xaxis_rangeslider_visible,\n title_text = title,\n **kwargs\n )\n self.fig.show()\n \n return self" }, { "alpha_fraction": 0.7230392098426819, "alphanum_fraction": 0.7303921580314636, "avg_line_length": 18.380952835083008, "blob_id": "bcfec38854862fbcf2fe66c0ab7486a53e712911", "content_id": "11aaa3f1c062114ce39c1747e6843c9d10560df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 408, "license_type": "no_license", "max_line_length": 81, "num_lines": 21, "path": "/Readme.md", "repo_name": "thatCarlyleGuy/bottlerocket", "src_encoding": "UTF-8", "text": "\n\n## Python Environment\n### Create env\n`conda create --name bottlerocketv1 python=3.9 -y`\n\n### Write env to file\n`conda env export > environment.yml`\n\n`conda activate bottlerocket`\n`conda deactivate`\n\n`conda info --envs`\n\n### Run env\n`conda activate bottlerocket`\n\n`jupyter lab`\n\n## Running the notebooks\n\nCreate a file called `secrets.py` in the `lib` folder that exports the following:\n- twelvedata_api_key" } ]
2
romarpatindol/xero_get_private_applications
https://github.com/romarpatindol/xero_get_private_applications
e0aa96748611a75ec37b173d19eb11d6e9197a04
2478bab72efaf2756bb727d8ed64ffcfe23dfd24
a16e93821fd74b6fd482c687a1445ec6d3e41db1
refs/heads/master
2020-03-11T23:43:47.728909
2018-04-20T13:21:21
2018-04-20T13:21:21
130,330,205
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6994622349739075, "alphanum_fraction": 0.7157936692237854, "avg_line_length": 30.578617095947266, "blob_id": "79a5210efebff4a06f73c492a75145eeda9bec48", "content_id": "ce9c17295f03624a587be64d2384ef36ae60d15f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5021, "license_type": "no_license", "max_line_length": 330, "num_lines": 159, "path": "/README.md", "repo_name": "romarpatindol/xero_get_private_applications", "src_encoding": "UTF-8", "text": "# Xero Accounting GET API for Private Applications\n\nA [Supercode](http://gosupercode.com) function that connects to Xero Accounting Get API for Private Applications\n\n## Server Usage\n\nGet the Supercode Python SDK: https://git.io/vxTjp\n\n```\nimport supercode\nimport pprint\n\ncredentials_json_data = {}\npayload={}\n\nresponse = supercode.call(\n \"super-code-function\",\n \"your-supercode-api-key\",\n consumer_key,\n rsa_key,\n query\n)\n\npprint(response)\n```\n\nPyXero\n======\n\nPyXero is a Python API for accessing the REST API provided by the [Xero](https://developer.xero.com)\naccounting tool. It allows access to both Public, Private and Partner applications.\n\n### Private Applications\n\nIf using a Private application, you will need to install `PyCrypto`, a pure\nPython cryptographic module. You'll also need to generate an signed RSA\ncertificate, and submit that certificate as part of registering your\napplication with Xero. See the [Xero Developer documentation](https://developer.xero.com/) for more\ndetails.\n\nWhen you [register your private application with Xero](https://developer.xero.com/documentation/auth-and-limits/private-applications/), you'll be given a\n**Consumer Key**. You'll also be given a **Consumer secret** - this can be\nignored.\n\nUsing the Private credentials is much simpler than the Public credentials,\nbecause there's no verification step -- verification is managed using RSA\nsigned API requests:\n\n[Follow these steps](https://developer.xero.com/documentation/api-guides/create-publicprivate-key/) to generate a public/private key pair to sign your requests. You'll upload your public key when you create your Xero Private app at https://app.xero.com. You'll use the private key (aka RSA key) to generate your oAuth signature.\n\nThe RSA key is a multi-line string that will look something like::\n\n -----BEGIN RSA PRIVATE KEY-----\n MIICXgIBAAKBgQDWJbmxJjQLGM76sZkk2EhsdpV0Gxtrhzh/wiNBGffa5JHV/Ex4\n ....\n mtXGQjKqsOpuCw7HwgnRQUWKYbaJ3a+yTCFjVwa9keQhDQ==\n -----END RSA PRIVATE KEY-----\n\nYou can get this string by either reading the contents of your private key\nfile into a variable, or storing the key value as a constant. If you choose to\nstore the key value as a constant, remember two things:\n\n* Make sure there is no leading space before\n the ``-----BEGIN PRIVATE KEY-----`` portion of the string.\n\n\n## Query\n\n```python\n# Retrieve all contact objects\n>>> query = \"contacts.all()\"\n[{...contact info...}, {...contact info...}, {...contact info...}, ...]\n\n# Retrieve a specific contact object\n>>> query = \"contacts.get('b2b5333a-2546-4975-891f-d71a8a640d23')\"\n{...contact info...}\n\n# Retrive all contacts updated since 1 Jan 2013\n>>> query = \"contacts.filter(since=datetime(2013, 1, 1))\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n\n# Retrive all contacts whose name is 'John Smith'\n>>> query = \"contacts.filter(Name='John Smith')\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n\n# Retrive all contacts whose name starts with 'John'\n>>> query = \"contacts.filter(Name__startswith='John')\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n\n# Retrive all contacts whose name ends with 'Smith'\n>>> query = \"contacts.filter(Name__endswith='Smith')\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n\n# Retrive all contacts whose name starts with 'John' and ends with 'Smith'\n>>> query = \"contacts.filter(Name__startswith='John', Name__endswith='Smith')\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n\n# Retrive all contacts whose name contains 'mit'\n>>> query = \"contacts.filter(Name__contains='mit')\"\n[{...contact info...}, {...contact info...}, {...contact info...}]\n```\n\nComplex filters can be constructed, for example retrieving invoices for a contact:\n\n```python\n>>> query = \"invoices.filter(Contact_ContactID='83ad77d8-48a7-4f77-9146-e6933b7fb63b')\"\n```\n\nFilters which aren't supported by this API can also be constructed using 'raw' mode like this:\n```python\n>>> query = \"invoices.filter(raw='AmountDue > 0')\"\n```\n\nBe careful when dealing with large amounts of data, the Xero API will take an\nincreasingly long time to respond, or an error will be returned. If a query might\nreturn more than 100 results, you should make use of the ``page`` parameter::\n\n```python\n# Grab 100 invoices created after 01-01-2013\n>>> query = \"invoices.filter(since=datetime(2013, 1, 1), page=1)\"\n```\n\nYou can also order the results to be returned::\n\n```python\n# Grab contacts ordered by EmailAddress\n>>> query = \"contacts.filter(order='EmailAddress DESC')\"\n```\n\nThis same API pattern exists for the following API objects:\n\n* Accounts\n* Attachments\n* BankTransactions\n* BankTransfers\n* BrandingThemes\n* ContactGroups\n* Contacts\n* CreditNotes\n* Currencies\n* Employees\n* ExpenseClaims\n* Invoices\n* Items\n* Journals\n* ManualJournals\n* Organisation\n* Overpayments\n* Payments\n* Prepayments\n* Purchase Orders\n* Receipts\n* RepeatingInvoices\n* Reports\n* TaxRates\n* TrackingCategories\n* Users\n\n**Note:** Supercode has not been launched yet. This is for internal testing only.\n" }, { "alpha_fraction": 0.6170839667320251, "alphanum_fraction": 0.6170839667320251, "avg_line_length": 23.285715103149414, "blob_id": "7c22456eccddc5f2dcb8512139a1deb269253f7e", "content_id": "2b169db0e91bd52e621e9adaad75e9eac56b662d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 63, "num_lines": 28, "path": "/main.py", "repo_name": "romarpatindol/xero_get_private_applications", "src_encoding": "UTF-8", "text": "import json\nimport datetime\nfrom xero import Xero\nfrom xero.auth import PrivateCredentials\n\n\ndef myconverter(o):\n if isinstance(o, datetime.datetime):\n return o.__str__()\n\ndef main(consumer_key, rsa_key, query):\n try:\n credentials = PrivateCredentials(consumer_key, rsa_key)\n xero = Xero(credentials)\n xero_data = eval(\"xero.\" + query)\n\n response = {\n \"data\": xero_data\n }\n \n except Exception as e:\n response = {\n \"error\": e\n }\n \n # Convert datetime to str to make the data serializable\n str_response = json.dumps(response, default=myconverter)\n return json.loads(str_response)" } ]
2
Celestares/CrPYtoLearn
https://github.com/Celestares/CrPYtoLearn
e8188526af541db77b70b21e6492adc52bcf3bf5
7e3136c27f6e105f9392eaedba3b1409d6c184c1
19fe2a5a3ba6598d7d1624ef48173fcc1e34d331
refs/heads/main
2023-02-11T13:18:06.511398
2021-01-08T12:21:43
2021-01-08T12:21:43
327,708,242
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.473175972700119, "alphanum_fraction": 0.4967811107635498, "avg_line_length": 30.13793182373047, "blob_id": "34a6ca8dfa9701c6f8eaedbd25bfce38e126c833", "content_id": "ff2bf39e87d98d991df83580622dd5f369f5daea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1864, "license_type": "no_license", "max_line_length": 98, "num_lines": 58, "path": "/CryptoPackage/shift_cipher.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "base64_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=' # Q1, Q2, Q3\r\nalphabets = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\r\ndef encrypt(key, plaintext_utf8, chars):\r\n ciphertext_utf8 = \"\"\r\n\r\n if chars == \"alpha\":\r\n for char in plaintext_utf8:\r\n\r\n if char.lower() in alphabets:\r\n pos = alphabets.index(char.lower())\r\n pos = (pos + key) % len(alphabets)\r\n if char.isupper():\r\n ciphertext_utf8 += alphabets[pos].upper()\r\n else:\r\n ciphertext_utf8 += alphabets[pos]\r\n else:\r\n ciphertext_utf8 += char\r\n else:\r\n for char in plaintext_utf8:\r\n\r\n if char in base64_chars:\r\n pos = base64_chars.index(char)\r\n pos = (pos + key) % len(base64_chars)\r\n ciphertext_utf8 += base64_chars[pos]\r\n else:\r\n ciphertext_utf8 += char\r\n\r\n return ciphertext_utf8\r\n\r\n\r\ndef decrypt(key, ciphertext_utf8, chars):\r\n decryptedtext_utf = \"\"\r\n\r\n if chars == \"alpha\":\r\n for char in ciphertext_utf8:\r\n\r\n if char.lower() in alphabets:\r\n pos = alphabets.index(char.lower())\r\n pos = (pos - key) % len(alphabets)\r\n if char.isupper():\r\n decryptedtext_utf += alphabets[pos].upper()\r\n else:\r\n decryptedtext_utf += alphabets[pos]\r\n else:\r\n decryptedtext_utf += char\r\n else:\r\n for char in ciphertext_utf8:\r\n\r\n if char in base64_chars:\r\n pos = base64_chars.index(char)\r\n pos = (pos - key) % len(base64_chars)\r\n decryptedtext_utf += base64_chars[pos]\r\n else:\r\n decryptedtext_utf += char\r\n\r\n return decryptedtext_utf\r\n" }, { "alpha_fraction": 0.7245222926139832, "alphanum_fraction": 0.7245222926139832, "avg_line_length": 27.904762268066406, "blob_id": "b4401e9157196693def048fb1b80c2a09abba5eb", "content_id": "9bbfecdaea9f0722a30dcb713c7a19dba90d1dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 47, "num_lines": 21, "path": "/CryptoPackage/_test_all.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import shift_cipher_test\r\nfrom CryptoPackage import monoalpha_cipher_test\r\nfrom CryptoPackage import railfence_cipher_test\r\nfrom CryptoPackage import column_cipher_test\r\nfrom CryptoPackage import vernam_cipher_test\r\nfrom CryptoPackage import dhkey_exchange_test\r\nfrom CryptoPackage import aes_test\r\n\r\n\r\ndef run_test():\r\n shift_cipher_test.run_test()\r\n monoalpha_cipher_test.run_test()\r\n railfence_cipher_test.run_test()\r\n column_cipher_test.run_test()\r\n vernam_cipher_test.run_test()\r\n dhkey_exchange_test.run_test()\r\n aes_test.run_test()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.5737226009368896, "alphanum_fraction": 0.5912408828735352, "avg_line_length": 29.136363983154297, "blob_id": "6588d60b862cb37a13771ea0c724db468bb32aca", "content_id": "b569459ec456c66c46e17af2a79399692d1a91c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 79, "num_lines": 22, "path": "/CryptoPackage/railfence_cipher_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import railfence_cipher\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nRail Fence cipher test\")\r\n print(\"---------------------------------------------\")\r\n\r\n row1 = 3\r\n row2 = 7\r\n string = \"The quick brown fox jumps over the lazy dog.\"\r\n\r\n ciphertext = railfence_cipher.encrypt(row1, string)\r\n print(f\"Ciphertext (3 rows) : {ciphertext}\")\r\n print(f\"Plaintext (3 rows) : {railfence_cipher.decrypt(row1, ciphertext)}\")\r\n ciphertext = railfence_cipher.encrypt(row2, string)\r\n print(f\"Ciphertext (7 rows) : {ciphertext}\")\r\n print(f\"Plaintext (7 rows) : {railfence_cipher.decrypt(row2, ciphertext)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.7694204449653625, "alphanum_fraction": 0.7718865871429443, "avg_line_length": 26.03333282470703, "blob_id": "6c7388d25c911713dd6eb52383d80abcb0437ad6", "content_id": "38646bd38fc25c1107425cea15f7dcf3301a4425", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 811, "license_type": "no_license", "max_line_length": 232, "num_lines": 30, "path": "/README.md", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "# CrPYtoLearn\nMy applied cryptography project\n\n## Created using\n- Python\n- Flask / Jinja\n- HTML, CSS, JavaScript\n- PyCryptodome\n\n## About Project\nThis was an individual project where we are tasked to create an application and implement many various common cryptographic techniques and algorithms that aims to teach the user on how they work and allow the user to play with them.\n\n## Project Features\n#### Crytographic techniques\n- Shift cipher\n- Mono-alphabet cipher\n- Rail fence technique\n- Simple columnar transposition technique\n- Vernam cipher\n- Diffie-Hellman key exchange\n\n#### Symmetric Algorithms\n- Electronic Code Book (ECB)\n- Cipher Block Chaining (CBC)\n- Cipher Feedback (CFB)\n- Output Feedback (OFB)\n\n#### Other features\n- 5 chapters about data security\n- Quiz system that test about the 5 chapters\n" }, { "alpha_fraction": 0.4974747598171234, "alphanum_fraction": 0.5113636255264282, "avg_line_length": 25.310344696044922, "blob_id": "fd3e0fe051e1d8179251728b090715176e98e390", "content_id": "3bdfbd93444cd0a8de43b0ddb7c10e9a91fb3943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 81, "num_lines": 29, "path": "/CryptoPackage/dhkey_exchange_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import dhkey_exchange\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nDiffie-Hellman Key Exchange test\")\r\n print(\"---------------------------------------------\")\r\n\r\n n = 5\r\n g = 19\r\n not_prime = 20\r\n x = 515\r\n y = 286\r\n if dhkey_exchange.is_prime(n):\r\n print(f\"{n} is a prime number\")\r\n else:\r\n print(f\"{n} is not a prime number\")\r\n if dhkey_exchange.is_prime(not_prime):\r\n print(f\"{not_prime} is a prime number\")\r\n else:\r\n print(f\"{not_prime} is not a prime number\")\r\n\r\n if dhkey_exchange.is_prime(n) and dhkey_exchange.is_prime(g):\r\n key = dhkey_exchange.generate_key(n, g, x, y)\r\n print(f\"Symmetric key is {key} using n = {n}, g = {g}, x = {x}, y = {y}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.5757575631141663, "avg_line_length": 35.55172348022461, "blob_id": "7893af2a9e3f13b2aa86f1a97c88963418bd7c61", "content_id": "040b654db0f2d1f2d568e51ef165212a9933d93e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1089, "license_type": "no_license", "max_line_length": 89, "num_lines": 29, "path": "/CryptoPackage/monoalpha_cipher_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import monoalpha_cipher\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nRunning Mono-alphabetic cipher test\")\r\n print(\"---------------------------------------------\")\r\n\r\n string = \"The quick brown fox jumps over the lazy dog.\"\r\n fixed_key = \"zyxwvutsrqponmlkjihgfedcba\"\r\n random_key = monoalpha_cipher.generate_key()\r\n\r\n if monoalpha_cipher.validate_key(fixed_key):\r\n print(f\"{fixed_key} is valid.\")\r\n if monoalpha_cipher.validate_key(random_key):\r\n print(f\"{random_key} is valid.\")\r\n\r\n print(f\"Input text : {string}\")\r\n print(\"----------------------------------------------------------------------\")\r\n ciphertext = monoalpha_cipher.encrypt(fixed_key, string)\r\n print(f\"Ciphertext (Fixed key) : {ciphertext}\")\r\n print(f\"Plaintext (Fixed key) : {monoalpha_cipher.decrypt(fixed_key, ciphertext)}\")\r\n ciphertext = monoalpha_cipher.encrypt(random_key, string)\r\n print(f\"Ciphertext (Random key) : {ciphertext}\")\r\n print(f\"Plaintext (Random key) : {monoalpha_cipher.decrypt(random_key, ciphertext)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.5279069542884827, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 21.88888931274414, "blob_id": "625f795d21caccc4a14fdf76e75f599181d26526", "content_id": "fd1cf9c75a6a12ef7832d983119a39eb4e47a738", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/CryptoPackage/vernam_cipher_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import vernam_cipher\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nVernam cipher test\")\r\n print(\"---------------------------------------------\")\r\n\r\n key = \"GOOD KEY 123\"\r\n string = \"Good day sir\"\r\n\r\n ciphertext = vernam_cipher.encrypt(key, string)\r\n print(f\"Ciphertext : {ciphertext}\")\r\n print(f\"Plaintext : {vernam_cipher.decrypt(key, ciphertext)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.576288640499115, "alphanum_fraction": 0.5804123878479004, "avg_line_length": 33.925926208496094, "blob_id": "4e3d25234c1b5e7aaac49685cd052f2798da65ed", "content_id": "8f9dbdbf8ec5a4d71d0be3842ec70ec9d6f5ecb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 970, "license_type": "no_license", "max_line_length": 77, "num_lines": 27, "path": "/CryptoPackage/aes_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import aes\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nAES test\")\r\n print(\"---------------------------------------------\")\r\n\r\n key_value = aes.get_random_key(192)\r\n string = \"Testing\".encode(\"utf-8\")\r\n\r\n etext = aes.encrypt_ecb(key_value, string)\r\n print(f\"Ciphertext (ECB) : {etext}\")\r\n print(f\"Plaintext (ECB) : {aes.decrypt_ecb(key_value, etext)}\")\r\n iv_value, etext = aes.encrypt_cbc(key_value, string)\r\n print(f\"Ciphertext (CBC) : {etext}\")\r\n print(f\"Plaintext (CBC) : {aes.decrypt_cbc(key_value, iv_value, etext)}\")\r\n iv_value, etext = aes.encrypt_cfb(key_value, string)\r\n print(f\"Ciphertext (CFB) : {etext}\")\r\n print(f\"Plaintext (CFB) : {aes.decrypt_cfb(key_value, iv_value, etext)}\")\r\n iv_value, etext = aes.encrypt_ofb(key_value, string)\r\n print(f\"Ciphertext (OFB) : {etext}\")\r\n print(f\"Plaintext (OFB) : {aes.decrypt_ofb(key_value, iv_value, etext)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.6575481295585632, "alphanum_fraction": 0.66497802734375, "avg_line_length": 34.55555725097656, "blob_id": "2c4898497e6b7b71ab072b554c2c0a1b52b99c4d", "content_id": "05269bde6d11fa59db3d812678c28927883e2380", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2961, "license_type": "no_license", "max_line_length": 116, "num_lines": 81, "path": "/CryptoPackage/aes.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from Crypto.Cipher import AES\r\nfrom Crypto.Random import get_random_bytes\r\nfrom Crypto.Util.Padding import pad, unpad\r\n\r\n\r\ndef get_random_key(bit_size): # 128, 192, 256\r\n key = get_random_bytes(int(bit_size / 8))\r\n return key\r\n\r\n\r\ndef convert_to_bin(string): # Convert string to binary literally and exactly 'hello' -> b'hello'\r\n # Because it uses eval(), let say the string is \"\\xqw\\m2f\\'eqw\"\r\n # and if run the code eval(\"b'%s'\" % string), you can say that sort of an injection attack have happened where\r\n # the code would become eval(\"b'\\xqw\\m2f\\'eqw'\"), and as you can see when we eval this it would give us an error\r\n # because it evaluated the byte string b'\\xqw\\m2f\\' and the remaining eqw' creates a syntax error\r\n # you can interpret it as bytes = b'\\xqw\\m2f\\'eqw'\r\n # which if u try to assign that to the variable \"bytes\" it would create a syntax error\r\n # try testing it out by running the this python file\r\n try:\r\n return eval(\"b\\\"%s\\\"\" % string)\r\n except:\r\n return eval(\"b\\'%s\\'\" % string)\r\n\r\n\r\ndef convert_to_str(binary): # Convert binary to string literally and exactly b'hello' -> 'hello'\r\n return f\"{binary}\"[2: -1]\r\n\r\n\r\ndef encrypt_ecb(key, plaintext):\r\n cipher_key = AES.new(key, AES.MODE_ECB)\r\n ciphertext = cipher_key.encrypt(pad(plaintext, AES.block_size))\r\n return ciphertext\r\n\r\n\r\ndef decrypt_ecb(key, ciphertext):\r\n cipher_key = AES.new(key, AES.MODE_ECB)\r\n plaintext = unpad(cipher_key.decrypt(ciphertext), AES.block_size)\r\n return plaintext\r\n\r\n\r\ndef encrypt_cbc(key, plaintext):\r\n cipher_key = AES.new(key, AES.MODE_CBC)\r\n ciphertext = cipher_key.encrypt(pad(plaintext, AES.block_size))\r\n return cipher_key.iv, ciphertext\r\n\r\n\r\ndef decrypt_cbc(key, iv, ciphertext):\r\n cipher_key = AES.new(key, AES.MODE_CBC, iv)\r\n plaintext = unpad(cipher_key.decrypt(ciphertext), AES.block_size)\r\n return plaintext\r\n\r\n\r\ndef encrypt_cfb(key, plaintext):\r\n cipher_key = AES.new(key, AES.MODE_CFB)\r\n ciphertext = cipher_key.encrypt(pad(plaintext, AES.block_size))\r\n return cipher_key.iv, ciphertext\r\n\r\n\r\ndef decrypt_cfb(key, iv, ciphertext):\r\n cipher_key = AES.new(key, AES.MODE_CFB, iv)\r\n plaintext = unpad(cipher_key.decrypt(ciphertext), AES.block_size)\r\n return plaintext\r\n\r\n\r\ndef encrypt_ofb(key, plaintext):\r\n cipher_key = AES.new(key, AES.MODE_OFB)\r\n ciphertext = cipher_key.encrypt(pad(plaintext, AES.block_size))\r\n return cipher_key.iv, ciphertext\r\n\r\n\r\ndef decrypt_ofb(key, iv, ciphertext):\r\n cipher_key = AES.new(key, AES.MODE_OFB, iv)\r\n plaintext = unpad(cipher_key.decrypt(ciphertext), AES.block_size)\r\n return plaintext\r\n\r\n\r\nif __name__ == \"__main__\": # Running this .py file will create an error, is to demostrate convert to binary error\r\n test_string = r\"\\xqw\\m2f\\'eqw\" # r\"\" means raw string\r\n byte = eval(\"b'%s'\" % test_string)\r\n # byte = b'\\xqw\\m2f\\'eqw'\r\n # line 81 creates same error as line 80\r\n" }, { "alpha_fraction": 0.5399293303489685, "alphanum_fraction": 0.5399293303489685, "avg_line_length": 23.727272033691406, "blob_id": "b41d924bc2de63e0c5612e6c05592c703d6b8250", "content_id": "52a4d152d924ba86e835a25fb84669c21e2ef95a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1415, "license_type": "no_license", "max_line_length": 57, "num_lines": 55, "path": "/CryptoPackage/monoalpha_cipher.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from random import shuffle\r\n\r\n\r\ndef generate_key():\r\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n alpha_list = [s for s in alphabets]\r\n shuffle(alpha_list)\r\n return \"\".join(alpha_list)\r\n\r\n\r\ndef validate_key(key):\r\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n alpha_list = [s for s in alphabets]\r\n for alpha in key:\r\n if alpha.upper() in alpha_list:\r\n alpha_list.remove(alpha.upper())\r\n else:\r\n return False\r\n if alpha_list: # Make sure that list is empty\r\n return False\r\n return True\r\n\r\n\r\ndef encrypt(key, plaintext):\r\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n output = \"\"\r\n\r\n for char in plaintext:\r\n if char.upper() in alphabets:\r\n cipher_index = alphabets.index(char.upper())\r\n if char.isupper():\r\n output += key[cipher_index].upper()\r\n else:\r\n output += key[cipher_index].lower()\r\n else:\r\n output += char\r\n\r\n return output\r\n\r\n\r\ndef decrypt(key, ciphertext):\r\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n output = \"\"\r\n\r\n for char in ciphertext:\r\n if char.upper() in alphabets:\r\n plain_index = key.upper().index(char.upper())\r\n if char.isupper():\r\n output += alphabets[plain_index].upper()\r\n else:\r\n output += alphabets[plain_index].lower()\r\n else:\r\n output += char\r\n\r\n return output\r\n" }, { "alpha_fraction": 0.57852703332901, "alphanum_fraction": 0.590062141418457, "avg_line_length": 31.147058486938477, "blob_id": "46fb7c87e41b1832fd21c4f279535364a1a37587", "content_id": "f21d69b4db781164f29a78ab6a79a2d8474b2eb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1127, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/CryptoPackage/shift_cipher_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import shift_cipher\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nShift cipher test\")\r\n print(\"---------------------------------------------\")\r\n\r\n key = 3\r\n\r\n plaintext = \"HELLO\" # Q1, Q2\r\n ciphertext = shift_cipher.encrypt(key, plaintext, \"alpha\")\r\n decryptedtext = shift_cipher.decrypt(key, ciphertext, \"alpha\")\r\n print(\"plaintext: \" + plaintext)\r\n print(\"ciphertext: \" + ciphertext)\r\n print(\"decryptedtext: \" + decryptedtext + \"\\n\")\r\n\r\n plaintext = \"Hello!\" # Q1, Q2\r\n ciphertext = shift_cipher.encrypt(key, plaintext, \"alpha\")\r\n decryptedtext = shift_cipher.decrypt(key, ciphertext, \"alpha\")\r\n print(\"plaintext: \" + plaintext)\r\n print(\"ciphertext: \" + ciphertext)\r\n print(\"decryptedtext: \" + decryptedtext + \"\\n\")\r\n\r\n plaintext = \"Hello123+/=\" # Q3\r\n ciphertext = shift_cipher.encrypt(key, plaintext, \"b64\")\r\n decryptedtext = shift_cipher.decrypt(key, ciphertext, \"b64\")\r\n print(\"plaintext: \" + plaintext)\r\n print(\"ciphertext: \" + ciphertext)\r\n print(\"decryptedtext: \" + decryptedtext + \"\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" }, { "alpha_fraction": 0.5402131676673889, "alphanum_fraction": 0.5494186282157898, "avg_line_length": 29.272727966308594, "blob_id": "dcb6da52139c86a018d324207cdbca6fe455e216", "content_id": "c8e0e1b29e1b9026400ce8af8f3cfbec0cdf0ac0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2064, "license_type": "no_license", "max_line_length": 96, "num_lines": 66, "path": "/CryptoPackage/vernam_cipher.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\n\r\ndef verify_key(key, text):\r\n if len(key) != len(text):\r\n return False\r\n return True\r\n\r\n\r\ndef encrypt(key, plaintext):\r\n text_num_list = []\r\n key_num_list = []\r\n output = \"\"\r\n\r\n for char in plaintext:\r\n if char.upper() in alphabets:\r\n text_num_list.append(alphabets.index(char.upper()))\r\n else:\r\n text_num_list.append(-1) # For non-alphabetic characters\r\n\r\n for char in key:\r\n if char.upper() in alphabets:\r\n key_num_list.append(alphabets.index(char.upper()))\r\n else:\r\n key_num_list.append(-1) # For non-alphabetic characters\r\n\r\n for index, pair in enumerate(zip(text_num_list, key_num_list)):\r\n if -1 in pair: # Non-alphabetic characters will be ignored (follow plaintext character)\r\n output += plaintext[index]\r\n else:\r\n output_num = sum(pair) % 26\r\n output += alphabets[output_num]\r\n\r\n return output.upper()\r\n\r\n\r\ndef decrypt(key, ciphertext):\r\n text_num_list = []\r\n key_num_list = []\r\n output = \"\"\r\n\r\n for char in ciphertext:\r\n if char.upper() in alphabets:\r\n text_num_list.append(alphabets.index(char.upper()))\r\n else:\r\n text_num_list.append(-1) # For non-alphabetic characters\r\n\r\n for char in key:\r\n if char.upper() in alphabets:\r\n key_num_list.append(alphabets.index(char.upper()))\r\n else:\r\n key_num_list.append(-1) # For non-alphabetic characters\r\n\r\n for index, pair in enumerate(zip(text_num_list, key_num_list)):\r\n if -1 in pair: # Non-alphabetic characters will be ignored (follow plaintext character)\r\n output += ciphertext[index]\r\n else:\r\n if pair[0] < pair[1]:\r\n output_num = pair[0] + 26 - pair[1]\r\n # elif pair[0] == pair[1]:\r\n # output_num = pair[0]\r\n else:\r\n output_num = pair[0] - pair[1]\r\n output += alphabets[output_num]\r\n\r\n return output.upper()\r\n" }, { "alpha_fraction": 0.5428170561790466, "alphanum_fraction": 0.5531118512153625, "avg_line_length": 25.05063247680664, "blob_id": "a3a5a0946f8712e43bc3d406c76d54b387bd01d7", "content_id": "08eed53e77e25c7b27bbc8c90bad11c8fcf7817e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2137, "license_type": "no_license", "max_line_length": 74, "num_lines": 79, "path": "/CryptoPackage/railfence_cipher.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "def validate_row(row):\r\n if row < 2:\r\n return False\r\n return True\r\n\r\n\r\ndef encrypt(row, plaintext):\r\n matrix = []\r\n for i in range(row):\r\n matrix.append([])\r\n\r\n reverse = False\r\n row_index = 0\r\n row_boundary = (0, row - 1)\r\n output = \"\"\r\n\r\n for char in plaintext:\r\n matrix[row_index].append(char)\r\n if reverse:\r\n row_index -= 1\r\n else:\r\n row_index += 1\r\n if row_index in row_boundary:\r\n reverse = not reverse # Swap between True and False state\r\n\r\n for line in matrix:\r\n output += \"\".join(line)\r\n\r\n return output\r\n\r\n\r\ndef decrypt(row, ciphertext):\r\n output = \"\"\r\n\r\n # Determine and calculate number of characters for each row (segments)\r\n length = len(ciphertext)\r\n segment_num = 2 * (row - 1)\r\n base_segment_len = length // segment_num\r\n remains = length - (base_segment_len * segment_num)\r\n\r\n segment_list = [base_segment_len, base_segment_len]\r\n for i in range(row - 2):\r\n segment_list.insert(1, base_segment_len * 2)\r\n\r\n row_index = 0\r\n reverse = False\r\n row_boundary = (0, row - 1)\r\n for i in range(remains):\r\n segment_list[row_index] += 1\r\n # Counter that counts up and down based on boundary\r\n if reverse:\r\n row_index -= 1\r\n else:\r\n row_index += 1\r\n if row_index in row_boundary:\r\n reverse = not reverse # Swap between True and False state\r\n\r\n # Formatting matrix based on segment list\r\n start = 0\r\n matrix = []\r\n for segment in segment_list:\r\n matrix.append(ciphertext[start:start + segment])\r\n start += segment\r\n for index, string in enumerate(matrix):\r\n matrix[index] = [char for char in string]\r\n\r\n # Reading matrix\r\n row_index = 0\r\n reverse = False\r\n while len(output) != length:\r\n output += matrix[row_index].pop(0)\r\n if reverse:\r\n row_index -= 1\r\n else:\r\n row_index += 1\r\n if row_index in row_boundary:\r\n reverse = not reverse # Swap between True and False state\r\n\r\n return output\r\n" }, { "alpha_fraction": 0.32732734084129333, "alphanum_fraction": 0.36036035418510437, "avg_line_length": 16.38888931274414, "blob_id": "f7d9a64568c2510b04dfb8e81d91ff3fed15f467", "content_id": "d4f4b1510e9612667bfe7340c0bd0787ecdc27a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/CryptoPackage/dhkey_exchange.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "def is_prime(n):\r\n if n == 2:\r\n return True\r\n for i in range(2, int(n ** 0.5) + 1):\r\n if n % i == 0:\r\n return False\r\n return True\r\n\r\n\r\ndef generate_key(n, g, x, y):\r\n a = (g ** x) % n\r\n b = (g ** y) % n\r\n\r\n k1 = (b ** x) % n\r\n k2 = (a ** y) % n\r\n\r\n if k1 == k2:\r\n return k1\r\n\r\n" }, { "alpha_fraction": 0.5026316046714783, "alphanum_fraction": 0.5043859481811523, "avg_line_length": 22.782608032226562, "blob_id": "32d8aa186aceea1e416808b337544edf48d655ef", "content_id": "c6b11ce3cdd40ef24d966a71afe7ee50831d9d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2280, "license_type": "no_license", "max_line_length": 71, "num_lines": 92, "path": "/CryptoPackage/column_cipher.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "def verify_col(col):\r\n if col < 2:\r\n return False\r\n return True\r\n\r\n\r\ndef verify_inputs(col, key):\r\n if key == \"\":\r\n if col < 2:\r\n return False\r\n else:\r\n if col < 2:\r\n return False\r\n elif len(key) != col:\r\n return False\r\n\r\n if not key.isalpha():\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef encrypt(col, key, plaintext):\r\n matrix = []\r\n sublist = []\r\n suboutput = \"\"\r\n output = []\r\n\r\n for char in plaintext:\r\n sublist.append(char)\r\n if len(sublist) == col:\r\n matrix.append(sublist)\r\n sublist = []\r\n if sublist: # Check if there is remainings\r\n sublist = sublist + [\" \"] * (col - len(sublist))\r\n matrix.append(sublist)\r\n\r\n for c in range(col):\r\n for r in range(len(matrix)):\r\n suboutput += matrix[r][c]\r\n output.append(suboutput)\r\n suboutput = \"\"\r\n\r\n if not key:\r\n return \"\".join(output)\r\n else:\r\n temp_output = output\r\n output = []\r\n unsorted_key = [char for char in key.upper()]\r\n sorted_key = sorted(unsorted_key)\r\n for letter in sorted_key:\r\n column = unsorted_key.index(letter)\r\n output.append(temp_output[column])\r\n unsorted_key[column] = \"_\"\r\n return \"\".join(output)\r\n\r\n\r\ndef decrypt(col, key, ciphertext):\r\n matrix = []\r\n row = len(ciphertext) // col\r\n start = 0\r\n output = []\r\n\r\n for r in range(col):\r\n matrix.append([char for char in ciphertext[start:start + row]])\r\n start += row\r\n\r\n if key: # Sorting the rows in order based on key\r\n temp_matrix = matrix\r\n matrix = []\r\n unsorted_key = [char for char in key.upper()]\r\n sorted_key = sorted(unsorted_key)\r\n for letter in unsorted_key:\r\n row_index = sorted_key.index(letter)\r\n matrix.append(temp_matrix[row_index])\r\n sorted_key[row_index] = \"_\"\r\n\r\n for r in range(row):\r\n suboutput = \"\"\r\n for c in range(col):\r\n suboutput += matrix[c][r]\r\n output.append(suboutput)\r\n return \"\".join(output).rstrip()\r\n\r\n\r\n# T h e q u i\r\n# c k b r o w\r\n# n f o x j\r\n# u m p s o v\r\n# e r t h e\r\n# l a z y d o\r\n# g .\r\n" }, { "alpha_fraction": 0.562960147857666, "alphanum_fraction": 0.5651592016220093, "avg_line_length": 34.56993103027344, "blob_id": "4733188e34bed396293965c70b88526cb97ef477", "content_id": "951aa3b46bb813d81b9fb96efeaaecad4aa4b4dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10459, "license_type": "no_license", "max_line_length": 144, "num_lines": 286, "path": "/cryptoapp.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request\r\nfrom CryptoPackage import shift_cipher, monoalpha_cipher, railfence_cipher, column_cipher, vernam_cipher, aes, dhkey_exchange\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected](\"/\") # Homepage\r\ndef home():\r\n return render_template(\"home.html\")\r\n\r\n\r\n#\r\n#\r\n#\r\n#\r\n\r\n\r\[email protected](\"/learn\") # Learn various security topics\r\ndef learn():\r\n return render_template(\"learn/learn.html\", title=\"Learning Materials\")\r\n\r\n\r\[email protected](\"/learn/need_security\")\r\ndef learn_security():\r\n return render_template(\"learn/learn_security.html\", title=\"Learn - Need for security\")\r\n\r\n\r\[email protected](\"/learn/trusted_systems\")\r\ndef learn_system():\r\n return render_template(\"learn/learn_system.html\", title=\"Learn - Trusted systems and reference monitor\")\r\n\r\n\r\[email protected](\"/learn/security_models\")\r\ndef learn_model():\r\n return render_template(\"learn/learn_model.html\", title=\"Learn - Security models\")\r\n\r\n\r\[email protected](\"/learn/security_management\")\r\ndef learn_management():\r\n return render_template(\"learn/learn_management.html\", title=\"Learn - Security management practices\")\r\n\r\n\r\[email protected](\"/learn/type_attacks\")\r\ndef learn_attack():\r\n return render_template(\"learn/learn_attack.html\", title=\"Learn - Types of attacks\")\r\n\r\n\r\[email protected](\"/learn/quiz\", methods=[\"GET\", \"POST\"])\r\ndef learn_quiz():\r\n answered = False\r\n correct_ans = [\"B\", [\"B\", \"C\", \"D\"], \"C\", \"effort\", \"B\"]\r\n if request.method == \"POST\":\r\n answered = True\r\n try:\r\n ans1 = request.form[\"q1\"].upper()\r\n except:\r\n ans1 = \"Unanswered\"\r\n try:\r\n ans2 = request.form.getlist(\"q2\")\r\n ans2 = [ans.upper() for ans in ans2]\r\n except:\r\n ans2 = \"Unanswered\"\r\n try:\r\n ans3 = request.form[\"q3\"].upper()\r\n except:\r\n ans3 = \"Unanswered\"\r\n try:\r\n ans4 = request.form[\"q4\"].lower()\r\n except:\r\n ans4 = \"Unanswered\"\r\n try:\r\n ans5 = request.form[\"q5\"].upper()\r\n except:\r\n ans5 = \"Unanswered\"\r\n given_ans = [ans1, ans2, ans3, ans4, ans5]\r\n result = list(zip(given_ans, correct_ans))\r\n return render_template(\"learn/learn_quiz.html\", title=\"Learn - Quiz\", answered=answered, result=result)\r\n\r\n return render_template(\"learn/learn_quiz.html\", title=\"Learn - Quiz\", answered=answered)\r\n\r\n\r\n#\r\n#\r\n#\r\n\r\n\r\[email protected](\"/cryptography\") # Cryptography techniques\r\ndef cryptography():\r\n return render_template(\"Crypto/cryptography.html\", title=\"Cryptography\")\r\n\r\n\r\[email protected](\"/cryptography/shiftcipher\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_shiftcipher():\r\n output = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n mode = request.form[\"mode\"]\r\n chars = request.form[\"characters\"]\r\n key = int(request.form[\"key\"])\r\n text = request.form[\"text\"]\r\n if mode == \"encrypt\":\r\n output = shift_cipher.encrypt(key, text, chars)\r\n elif mode == \"decrypt\":\r\n output = shift_cipher.decrypt(key, text, chars)\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n return render_template(\"Crypto/cryptography_shiftcipher.html\", title=\"Crypto - Shift Cipher\", output=output)\r\n\r\n\r\[email protected](\"/cryptography/monoalphacipher\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_monoalphacipher():\r\n output = \"\"\r\n key = \"\"\r\n generated_key_used = False\r\n if request.method == \"POST\":\r\n try:\r\n mode = request.form[\"mode\"]\r\n key = request.form[\"key\"]\r\n if key:\r\n assert monoalpha_cipher.validate_key(key)\r\n else:\r\n assert mode == \"encrypt\"\r\n key = monoalpha_cipher.generate_key()\r\n generated_key_used = True\r\n text = request.form[\"text\"]\r\n if mode == \"encrypt\":\r\n output = monoalpha_cipher.encrypt(key, text)\r\n elif mode == \"decrypt\":\r\n print(\"decrypting\")\r\n print(text)\r\n print(key)\r\n output = monoalpha_cipher.decrypt(key, text)\r\n print(output)\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n if generated_key_used:\r\n return render_template(\"Crypto/cryptography_monoalphacipher.html\", title=\"Crypto - Mono-alphabet Cipher\", output=output, output_key=key)\r\n else:\r\n return render_template(\"Crypto/cryptography_monoalphacipher.html\", title=\"Crypto - Mono-alphabet Cipher\", output=output)\r\n\r\n\r\[email protected](\"/cryptography/railfencetech\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_railfencetech():\r\n output = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n mode = request.form[\"mode\"]\r\n rows = int(request.form[\"row\"])\r\n text = request.form[\"text\"]\r\n if mode == \"encrypt\":\r\n output = railfence_cipher.encrypt(rows, text)\r\n elif mode == \"decrypt\":\r\n output = railfence_cipher.decrypt(rows, text)\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n return render_template(\"Crypto/cryptography_railfencetech.html\", title=\"Crypto - Rail Fence Technique\", output=output)\r\n\r\n\r\[email protected](\"/cryptography/coltranstech\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_coltranstech():\r\n output = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n mode = request.form[\"mode\"]\r\n cols = int(request.form[\"columns\"])\r\n key = request.form[\"key\"]\r\n if key:\r\n assert column_cipher.verify_inputs(cols, key)\r\n text = request.form[\"text\"]\r\n if mode == \"encrypt\":\r\n output = column_cipher.encrypt(cols, key, text)\r\n elif mode == \"decrypt\":\r\n output = column_cipher.decrypt(cols, key, text)\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n return render_template(\"Crypto/cryptography_coltranstech.html\", title=\"Crypto - Simple Columnar Transposition Technique\", output=output)\r\n\r\n\r\[email protected](\"/cryptography/vernamcipher\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_vernamcipher():\r\n output = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n mode = request.form[\"mode\"]\r\n key = request.form[\"key\"]\r\n text = request.form[\"text\"]\r\n assert vernam_cipher.verify_key(key, text)\r\n if mode == \"encrypt\":\r\n output = vernam_cipher.encrypt(key, text)\r\n elif mode == \"decrypt\":\r\n output = vernam_cipher.decrypt(key, text)\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n return render_template(\"Crypto/cryptography_vernamcipher.html\", title=\"Crypto - Vernam Cipher\", output=output)\r\n\r\n\r\[email protected](\"/cryptography/dhkeyexchange\", methods=[\"GET\", \"POST\"])\r\ndef cryptography_dhkeyexchange():\r\n output = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n n = int(request.form[\"n\"])\r\n g = int(request.form[\"g\"])\r\n x = int(request.form[\"x\"])\r\n y = int(request.form[\"y\"])\r\n assert dhkey_exchange.is_prime(n)\r\n assert dhkey_exchange.is_prime(g)\r\n output = dhkey_exchange.generate_key(n, g, x, y)\r\n except AssertionError:\r\n output = \"An error have occured! Either 'n' or 'g' is not a prime number.\"\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n\r\n return render_template(\"Crypto/cryptography_dhkeyexchange.html\", title=\"Crypto - Diffie-Hellman Key Exchange\", output=output)\r\n\r\n#\r\n#\r\n#\r\n\r\n\r\[email protected](\"/symmetric\") # Symmetric algorithms\r\ndef symmetric():\r\n return render_template(\"symmetric/symmetric.html\", title=\"Symmetric Algorithms\")\r\n\r\n\r\[email protected](\"/symmetric/AES\", methods=[\"GET\", \"POST\"])\r\ndef symmetric_aes():\r\n output = \"\"\r\n key = \"\"\r\n iv = \"\"\r\n if request.method == \"POST\":\r\n try:\r\n method = request.form[\"method\"]\r\n mode = request.form[\"mode\"]\r\n\r\n if method == \"encrypt\":\r\n key = int(request.form[\"key\"])\r\n key = aes.get_random_key(key)\r\n text = request.form[\"text\"].encode(\"utf-8\")\r\n\r\n if mode == \"ECB\":\r\n output = aes.encrypt_ecb(key, text)\r\n elif mode == \"CBC\":\r\n iv, output = aes.encrypt_cbc(key, text)\r\n elif mode == \"CFB\":\r\n iv, output = aes.encrypt_cfb(key, text)\r\n elif mode == \"OFB\":\r\n iv, output = aes.encrypt_ofb(key, text)\r\n\r\n elif method == \"decrypt\":\r\n key = request.form[\"key\"]\r\n key = aes.convert_to_bin(key)\r\n iv = request.form[\"iv\"]\r\n iv = aes.convert_to_bin(iv)\r\n text = request.form[\"text\"]\r\n text = aes.convert_to_bin(text)\r\n\r\n if mode == \"ECB\":\r\n output = aes.decrypt_ecb(key, text)\r\n elif mode == \"CBC\":\r\n output = aes.decrypt_cbc(key, iv, text)\r\n elif mode == \"CFB\":\r\n output = aes.decrypt_cfb(key, iv, text)\r\n elif mode == \"OFB\":\r\n output = aes.decrypt_ofb(key, iv, text)\r\n\r\n output = aes.convert_to_str(output)\r\n return render_template(\"symmetric/symmetric_aes.html\", title=\"Symmetric Algorithms - AES\", output=output)\r\n\r\n except:\r\n output = \"An error have occured! Please refer to the user guide for what may have caused the error.\"\r\n return render_template(\"symmetric/symmetric_aes.html\", title=\"Symmetric Algorithms - AES\", output=output)\r\n\r\n output = aes.convert_to_str(output)\r\n key = aes.convert_to_str(key)\r\n iv = aes.convert_to_str(iv)\r\n return render_template(\"symmetric/symmetric_aes.html\", title=\"Symmetric Algorithms - AES\", output=output, key=key, iv=iv)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n" }, { "alpha_fraction": 0.5773195624351501, "alphanum_fraction": 0.592783510684967, "avg_line_length": 31.7391300201416, "blob_id": "e31de97d50b6be7a15b8c80f0382368c8ce6288a", "content_id": "067a05bbc7b955360e003e72490ebe13d5ef3350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 776, "license_type": "no_license", "max_line_length": 94, "num_lines": 23, "path": "/CryptoPackage/column_cipher_test.py", "repo_name": "Celestares/CrPYtoLearn", "src_encoding": "UTF-8", "text": "from CryptoPackage import column_cipher\r\n\r\n\r\ndef run_test():\r\n\r\n print(\"\\n\\nSimple Columnar Transposition cipher test\")\r\n print(\"---------------------------------------------\")\r\n\r\n col1 = 4\r\n col2 = 7\r\n key = \"RAINBOW\"\r\n string = \"The quick brown fox jumps over the lazy dog.\"\r\n\r\n ciphertext = column_cipher.encrypt(col1, \"\", string)\r\n print(f\"Ciphertext (4 columns, no key) : {ciphertext}\")\r\n print(f\"Plaintext (4 columns, no key) : {column_cipher.decrypt(col1, '', ciphertext)}\")\r\n ciphertext = column_cipher.encrypt(col2, key, string)\r\n print(f\"Ciphertext (7 columns, with key) : {ciphertext}\")\r\n print(f\"Plaintext (7 columns, with key) : {column_cipher.decrypt(col2, key, ciphertext)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_test()\r\n" } ]
17
Kepler1987/pruebagotcha
https://github.com/Kepler1987/pruebagotcha
a659bc48c25d7cee12e5b62fa776c323d1ff51ed
e2c3b33375f687d9765ee445252eaa41aaf8bfc3
09d98fd4c1ec720c2f5c7fa0875643f33b9616eb
refs/heads/master
2020-03-29T14:47:29.312089
2018-09-23T23:04:38
2018-09-23T23:04:38
150,032,817
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6835852861404419, "alphanum_fraction": 0.6835852861404419, "avg_line_length": 50.5, "blob_id": "a3f119da801e1e7e91a702a206d3aa6bc076b258", "content_id": "50c9e4453c4556797a7bbfc101a996db3931e737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 164, "num_lines": 18, "path": "/GotchaDiablo/galeria/urls.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import FotosListView, FotosDetailView, FotosAgrega, FotosUpdate, FotosDelete, VideosListView, VideosDetailView, VideosAgrega, VideosUpdate, VideosDelete\n\nfotos_patterns = ([\n\tpath('', FotosListView.as_view(), name='fotos'),\n\tpath('<int:pk>/<slug:slug>/', FotosDetailView.as_view(), name='foto'),\n path('agrega/', FotosAgrega.as_view(), name='agrega'),\n path('actualizar/<int:pk>/', FotosUpdate.as_view(), name='actualizar'),\n path('eliminar/<int:pk>/', FotosDelete.as_view(), name='eliminar'),\n], 'fotos')\n\nvideos_patterns = ([\n\tpath('', VideosListView.as_view(), name='videos'),\n\tpath('<int:pk>/<slug:slug>/', VideosDetailView.as_view(), name='video'),\n path('agrega/', VideosAgrega.as_view(), name='agrega'),\n path('actualizar/<int:pk>/', VideosUpdate.as_view(), name='actualizar'),\n path('eliminar/<int:pk>/', VideosDelete.as_view(), name='eliminar'),\n], 'videos')" }, { "alpha_fraction": 0.651326060295105, "alphanum_fraction": 0.6583463549613953, "avg_line_length": 52.45833206176758, "blob_id": "c1af0ba51a51849f0bc79564dee472700c6c7ee9", "content_id": "e8e6c515e56261097f44cf65ff222e3e3343ba64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 117, "num_lines": 24, "path": "/GotchaDiablo/registration/templates/registration/login.html", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "{% extends 'core/base.html' %}\n{% load static %}\n{% block titulo %}Iniciar sesiรณn{% endblock %}\n{% block contenido %}\n<style>.errorlist{color:red;} </style>\n<div id=\"cuerpo\" class=\"text-center text-white\">\n <form class=\"form-signin\" method=\"post\">{% csrf_token %}\n <img class=\"mb-4\" src=\"{% static 'core/img/mascara.png' %}\" alt=\"\" width=\"77\" height=\"85\">\n {% if 'register' in request.GET %}\n <p style=\"color: green;\">Usuario registrado correctamente, ya puedes iniciar session</p>\n {% endif %}\n <h1 class=\"h3 mb-3 font-weight-normal\">Por favor ingresa</h1>\n {% if form.non_field_errors %}\n <p style=\"color:red\">Usuario o contraseรฑa incorrectos, prueba de nuevo.</p>\n {% endif %}\n <label for=\"username\" class=\"sr-only\">Usuario</label>\n <input type=\"text\" id=\"id_username\" name=\"username\" class=\"form-control\" placeholder=\"Usuario\" required >\n <label for=\"password\" class=\"sr-only\">Contraseรฑa</label>\n <input type=\"password\" id=\"id_password\" name=\"password\" class=\"form-control\" placeholder=\"Contrasena\" required>\n <input type=\"submit\" class=\"btn btn-lg btn-outline-primary btn-block\" value=\"Entrar\">\n <p>ยฟHa olvidado su contraseรฑa?</br>Puede recuperarla <a href=\"{% url 'password_reset' %}\">รกqui</a>.</p>\n </form>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.5597484111785889, "alphanum_fraction": 0.6247379183769226, "avg_line_length": 24.105262756347656, "blob_id": "262a6b7eae5b25d7f02f78ee084f658a4c19c34c", "content_id": "15921165773a5e47cc8fdef8d4176f3eb70f5bbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 123, "num_lines": 19, "path": "/GotchaDiablo/galeria/migrations/0004_auto_20180917_0731.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-09-17 12:31\n\nfrom django.db import migrations, models\nimport galeria.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('galeria', '0003_auto_20180917_0731'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='foto',\n name='foto',\n field=models.ImageField(blank=True, null=True, upload_to=galeria.models.custom_upload_to, verbose_name='Foto'),\n ),\n ]\n" }, { "alpha_fraction": 0.5503292679786682, "alphanum_fraction": 0.5559736490249634, "avg_line_length": 36.96428680419922, "blob_id": "370db3894a27fdd43ede1a5ae02e1602bbaeda22", "content_id": "1ff2da36798f29a6362d52afc71ff6f506531fdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 100, "num_lines": 28, "path": "/GotchaDiablo/galeria/forms.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django import forms \nfrom .models import Foto, Video\n\nclass FotoForm(forms.ModelForm):\n class Meta:\n model = Foto\n fields = ('foto', 'titulo', 'descripcion')\n widgets = {\n \t'foto': forms.ClearableFileInput(attrs={'class':'form-control-file mt-3'}),\n 'titulo': forms.TextInput(attrs={'class':'form-control mt-3', 'placeholder':'Titulo'}), \n 'descripcion': forms.Textarea(attrs={'class':'form-control mt-3'}),\n }\n labels = {\n \t'foto':'', 'titulo':'', 'descripcion':''\n }\n\nclass VideoForm(forms.ModelForm):\n class Meta:\n model = Video\n fields = ('video', 'titulo', 'descripcion')\n widgets = {\n 'video': forms.ClearableFileInput(attrs={'class':'form-control-file mt-3'}),\n 'titulo': forms.TextInput(attrs={'class':'form-control mt-3', 'placeholder':'Titulo'}), \n 'descripcion': forms.Textarea(attrs={'class':'form-control mt-3'}),\n }\n labels = {\n 'video':'', 'titulo':'', 'descripcion':''\n }\n" }, { "alpha_fraction": 0.7371202111244202, "alphanum_fraction": 0.7490092515945435, "avg_line_length": 33.272727966308594, "blob_id": "d82e98e8dd6cb520f675a6b6e1b52798c2b4773e", "content_id": "86ea174c5280cbb1b95f9a8233316f4beddf101c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/GotchaDiablo/paquetes/models.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Paquete(models.Model):\n\t\n\ttitulo = models.CharField(verbose_name='Titulo', max_length=100)\n\tequipo = models.CharField(verbose_name='Equipo', max_length=250)\n\tbolas = models.IntegerField(verbose_name='Balas')\n\tprecio = models.IntegerField(verbose_name='Precio')\n\tdescripcion = models.TextField(verbose_name='Descripcion')\n\tvalido = models.CharField(verbose_name='Valides', max_length=100)\n\tcreated = models.DateTimeField(verbose_name='Fecha de Creaciรณn', auto_now_add=True)\n\tupdated = models.DateTimeField(verbose_name='Fecha de Ediciรณn', auto_now=True)\n\n\n\tclass Meta:\n\t\tverbose_name='paquete'\n\t\tverbose_name_plural='paquetes'\n\t\tordering=['created']\n\n\tdef __str__(self):\n\t\treturn self.titulo\n\t\t\t" }, { "alpha_fraction": 0.5523918271064758, "alphanum_fraction": 0.5580865740776062, "avg_line_length": 34.15999984741211, "blob_id": "effeb216b5d0996f0a7d7e5e57d9658ba279c087", "content_id": "e726074cddaa6b11f3933c56e401e6e24423a2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 879, "license_type": "no_license", "max_line_length": 114, "num_lines": 25, "path": "/GotchaDiablo/paquetes/templates/paquetes/paquete_detail.html", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "{% extends 'core/base.html' %}\n{% load static %}\n{% block titulo %}{{paquete.titulo}}{% endblock %}\n{% block contenido %}\n{% include 'paquetes/includes/paquetes_menu.html'%}\n<main role=\"main\">\n <div class=\"container text-white\">\n <div class=\"row mt-3\">\n <div class=\"col-md-9 mx-auto\">\n <h2 class=\"section-heading mb-4 text-center\">{{paquete.titulo}}</h2>\n <div>\n Equipo: {{paquete.equipo|safe}}<br>\n Balas: {{paquete.bolas|safe}}<br>\n Precio: ${{paquete.precio|safe}}<br>\n Descripciรณn: {{paquete.descripcion|safe}}<br>\n Valides: {{paquete.valido|safe}}<hr>\n {% if request.user.is_staff %}\n <p><a class=\"btn btn-outline-primary\" href=\"{% url 'paquetes:actualizar' paquete.id %}\">Editar</a></p>\n {% endif %}\n </div>\n </div>\n </div>\n </div>\n</main>\n{% endblock %}" }, { "alpha_fraction": 0.7769784331321716, "alphanum_fraction": 0.7769784331321716, "avg_line_length": 31.102563858032227, "blob_id": "c860dea7bcaa08b6491acc651b73ec0c77644c34", "content_id": "69cae50db8fb3a091e1bc9c99b4967fc381452a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1251, "license_type": "no_license", "max_line_length": 78, "num_lines": 39, "path": "/GotchaDiablo/paquetes/views.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.utils import timezone\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom .models import Paquete\nfrom .forms import PaqueteForm\n\n\n\n# Create your views here.\nclass PaqueteListView(ListView):\n model = Paquete\n\nclass PaqueteDetailView(DetailView):\n\tmodel = Paquete\n\n@method_decorator(staff_member_required, name='dispatch')\nclass PaqueteCreate(CreateView):\n model = Paquete\n form_class = PaqueteForm\n success_url = reverse_lazy('paquetes:paquetes')\n\n@method_decorator(staff_member_required, name='dispatch')\nclass PaqueteUpdate(UpdateView):\n model = Paquete\n form_class = PaqueteForm\n template_name_suffix = '_update_form'\n\n def get_success_url(self):\n \treturn reverse_lazy('paquetes:actualizar', args=[self.object.id]) + '?ok'\n\n@method_decorator(staff_member_required, name='dispatch')\nclass PaqueteDelete(DeleteView):\n model = Paquete\n success_url = reverse_lazy('paquetes:paquetes')" }, { "alpha_fraction": 0.7053045034408569, "alphanum_fraction": 0.7053045034408569, "avg_line_length": 50, "blob_id": "8161939d83643394c61c9a11e2035e1ba75b341e", "content_id": "d82f6905affe3167bf268a45417d1a5840912f13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 98, "num_lines": 10, "path": "/GotchaDiablo/paquetes/urls.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import PaqueteListView, PaqueteDetailView, PaqueteCreate, PaqueteUpdate, PaqueteDelete\n\npaquetes_patterns = ([\n\tpath('', PaqueteListView.as_view(), name='paquetes'),\n\tpath('<int:pk>/<slug:slug>/', PaqueteDetailView.as_view(), name='paquete'),\n path('crear/', PaqueteCreate.as_view(), name='crear'),\n path('actualizar/<int:pk>/', PaqueteUpdate.as_view(), name='actualizar'),\n path('eliminar/<int:pk>/', PaqueteDelete.as_view(), name='eliminar'),\n], 'paquetes')" }, { "alpha_fraction": 0.7860465049743652, "alphanum_fraction": 0.7860465049743652, "avg_line_length": 23, "blob_id": "cabcc649575382c5d268c59c0e61022b60b7e5ca", "content_id": "f3e7bfa25f8af163e0535828282648eb33d30035", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/GotchaDiablo/paquetes/admin.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Paquete\n\n# Register your models here.\nclass PaqueteAdmin(admin.ModelAdmin):\n\treadonly_fields = ('created', 'updated')\n\n\nadmin.site.register(Paquete, PaqueteAdmin)" }, { "alpha_fraction": 0.5613126158714294, "alphanum_fraction": 0.5768566727638245, "avg_line_length": 36.35483932495117, "blob_id": "341fea0b3ae513b69d4b4d14accbf6f329a69ca7", "content_id": "2db7a27e7914c9b9dce412897329b2c6184e7940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1160, "license_type": "no_license", "max_line_length": 132, "num_lines": 31, "path": "/GotchaDiablo/galeria/migrations/0001_initial.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-09-17 12:14\n\nfrom django.db import migrations, models\nimport galeria.models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Foto',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('foto', models.ImageField(blank=True, null=True, upload_to='fotos', verbose_name=galeria.models.custom_upload_to)),\n ('titulo', models.CharField(blank=True, max_length=100, null=True, verbose_name='Titulo')),\n ('descripcion', models.TextField(blank=True, null=True, verbose_name='Descripcion')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de Creaciรณn')),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha de Ediciรณn')),\n ],\n options={\n 'verbose_name': 'foto',\n 'verbose_name_plural': 'fotos',\n 'ordering': ['created'],\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5316455960273743, "alphanum_fraction": 0.5806962251663208, "avg_line_length": 26.478260040283203, "blob_id": "d13ace1a848862e4c9c7a4853240645f445137b0", "content_id": "c81392bfa098356d6f68cc19cb91e9283579aa41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 100, "num_lines": 23, "path": "/GotchaDiablo/galeria/migrations/0005_auto_20180917_1007.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-09-17 15:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('galeria', '0004_auto_20180917_0731'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='foto',\n name='foto',\n field=models.ImageField(blank=True, null=True, upload_to='fotos', verbose_name='Foto'),\n ),\n migrations.AlterField(\n model_name='video',\n name='video',\n field=models.FileField(blank=True, null=True, upload_to='videos', verbose_name='Video'),\n ),\n ]\n" }, { "alpha_fraction": 0.7508196830749512, "alphanum_fraction": 0.7508196830749512, "avg_line_length": 29.600000381469727, "blob_id": "f5bd84b8e86187e08d0457173fcdf61d9f4af04b", "content_id": "69b43ee27609197c23ee66b811ddae8c7fd6f728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 73, "num_lines": 10, "path": "/GotchaDiablo/reservaciones/admin.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Reservacion\n\n# Register your models here.\nclass ReservacionAdmin(admin.ModelAdmin):\n\treadonly_fields = ('created', 'updated')\n\tfields = ('user', 'nombre', 'pub_date', 'correo', 'telefono', 'paquete')\n\n\nadmin.site.register(Reservacion, ReservacionAdmin)" }, { "alpha_fraction": 0.7965116500854492, "alphanum_fraction": 0.7965116500854492, "avg_line_length": 27.75, "blob_id": "d1c232aea49a7dcd8481af9446cc416f6e06bff4", "content_id": "e488cbc7bcf7620789396cfc58a21a1ea972efaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/GotchaDiablo/core/views.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.views.generic.base import TemplateView\nfrom django.views.generic.list import ListView\nfrom django.shortcuts import render\nfrom paquetes.models import Paquete\n\n\nclass InicioListView(ListView):\n model = Paquete\n template_name = \"core/inicio.html\"\n\nclass UbicacionPageView(TemplateView):\n template_name = \"core/ubicacion.html\"" }, { "alpha_fraction": 0.7461203932762146, "alphanum_fraction": 0.7498447895050049, "avg_line_length": 35.6136360168457, "blob_id": "92d21cee7c29c687fcf2936d61355d93bb7a4c9c", "content_id": "cd31a5c83fecd828314877cde006fcdfefe654df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 90, "num_lines": 44, "path": "/GotchaDiablo/galeria/models.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\n\ndef custom_upload_to(instance, filename):\n\told_instance = Foto.objects.get(pk=instance.pk)\n\told_instance.foto.delete()\n\treturn 'fotos/'+ filename\n\ndef customv_upload_to(instance, filename):\n\told_instance = Video.objects.get(pk=instance.pk)\n\told_instance.video.delete()\n\treturn 'videos/'+ filename\n\n# Create your models here.\nclass Foto(models.Model):\n\tfoto = models.ImageField(upload_to='fotos', verbose_name='Foto', null=True, blank=True)\n\ttitulo = models.CharField(verbose_name='Titulo', max_length=100, null=True, blank=True)\n\tdescripcion = models.TextField(verbose_name='Descripcion', null=True, blank=True)\n\tcreated = models.DateTimeField(verbose_name='Fecha de Creaciรณn', auto_now_add=True)\n\tupdated = models.DateTimeField(verbose_name='Fecha de Ediciรณn', auto_now=True)\n\n\tclass Meta:\n\t\tverbose_name='foto'\n\t\tverbose_name_plural='fotos'\n\t\tordering=['created']\n\n\tdef __str__(self):\n\t\treturn self.titulo\n\nclass Video(models.Model):\n\tvideo = models.FileField(verbose_name='Video', upload_to='videos', null=True, blank=True)\n\ttitulo = models.CharField(verbose_name='Titulo', max_length=100, null=True, blank=True)\n\tdescripcion = models.TextField(verbose_name='Descripcion', null=True, blank=True)\n\tcreated = models.DateTimeField(verbose_name='Fecha de Creaciรณn', auto_now_add=True)\n\tupdated = models.DateTimeField(verbose_name='Fecha de Ediciรณn', auto_now=True)\n\n\tclass Meta:\n\t\tverbose_name='video'\n\t\tverbose_name_plural='videos'\n\t\tordering=['created']\n\n\tdef __str__(self):\n\t\treturn self.titulo\n" }, { "alpha_fraction": 0.7480000257492065, "alphanum_fraction": 0.7480000257492065, "avg_line_length": 34.85714340209961, "blob_id": "1072fa54e73876e698097355e0ab5014b370e66a", "content_id": "066534c2cbfebfe817a09fea9e98ba5ec2158989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 80, "num_lines": 7, "path": "/GotchaDiablo/reservaciones/urls.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import Reservaciones, ReservacionListView\n\nreservacion_patterns = [\n path('', Reservaciones.as_view(), name=\"reservacion\"),\n path('reservaciones/', ReservacionListView.as_view(), name=\"reservaciones\"),\n]" }, { "alpha_fraction": 0.5871964693069458, "alphanum_fraction": 0.5871964693069458, "avg_line_length": 49.38888931274414, "blob_id": "059bcc0c8db43255249d4296bbcb0caf35019fc9", "content_id": "d179ea3a7f295e4638cd93dc6c3878ffc738409c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 96, "num_lines": 18, "path": "/GotchaDiablo/paquetes/forms.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django import forms \nfrom .models import Paquete\n\nclass PaqueteForm(forms.ModelForm):\n class Meta:\n model = Paquete\n fields = ('titulo', 'equipo', 'bolas', 'precio', 'descripcion', 'valido')\n widgets = {\n \t'titulo': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Titulo'}),\n \t'equipo': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Equipo'}), \n \t'bolas': forms.NumberInput(attrs={'class':'form-control', 'placeholder':'Balas'}),\n 'precio': forms.NumberInput(attrs={'class':'form-control', 'placeholder':'Precio'}),\n 'descripcion': forms.Textarea(attrs={'class':'form-control'}),\n 'valido': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Valides'}),\n }\n labels = {\n \t'titulo':'', 'equipo':'', 'bolas':'', 'precio':'', 'descripcion':'', 'valido':''\n }" }, { "alpha_fraction": 0.541829526424408, "alphanum_fraction": 0.5605942010879517, "avg_line_length": 37.75757598876953, "blob_id": "a42c9332e806f94ffe1fa51f05a17d8c3fb469a2", "content_id": "94348276787516b9ed6b093c9829bf71b6e89960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 114, "num_lines": 33, "path": "/GotchaDiablo/paquetes/migrations/0001_initial.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-09-17 12:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Paquete',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('titulo', models.CharField(max_length=100, verbose_name='Titulo')),\n ('equipo', models.CharField(max_length=250, verbose_name='Equipo')),\n ('bolas', models.IntegerField(verbose_name='Balas')),\n ('precio', models.IntegerField(verbose_name='Precio')),\n ('descripcion', models.TextField(verbose_name='Descripcion')),\n ('valido', models.CharField(max_length=100, verbose_name='Valides')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de Creaciรณn')),\n ('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha de Ediciรณn')),\n ],\n options={\n 'verbose_name': 'paquete',\n 'verbose_name_plural': 'paquetes',\n 'ordering': ['created'],\n },\n ),\n ]\n" }, { "alpha_fraction": 0.7625979781150818, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 37.78260803222656, "blob_id": "a3f802c1c426b1294c6370a837fc5fc4a59d10b8", "content_id": "a707ae64f6c9813a54a44ab5ae00cd708a757a13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 896, "license_type": "no_license", "max_line_length": 86, "num_lines": 23, "path": "/GotchaDiablo/reservaciones/models.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom paquetes.models import Paquete\n\n# Create your models here.\nclass Reservacion(models.Model):\n\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)\n\tnombre = models.CharField(verbose_name='Nombre', max_length=250) \n\tpub_date = models.DateField(verbose_name='Fecha de Reservaciรณn') \n\tcorreo = models.EmailField(verbose_name='Correo')\n\ttelefono = models.IntegerField(verbose_name='Telefono')\n\tpaquete = models.ForeignKey(Paquete, blank=True, null=True, on_delete=models.CASCADE)\n\tcreated = models.DateTimeField(verbose_name='Fecha de Creaciรณn', auto_now_add=True)\n\tupdated = models.DateTimeField(verbose_name='Fecha de Ediciรณn', auto_now=True)\n\n\tclass Meta:\n\t\tverbose_name='reservacion'\n\t\tverbose_name_plural='reservaciones'\n\t\tordering=['created']\n\n\tdef __str__(self):\n\t\treturn self.correo\n\n" }, { "alpha_fraction": 0.6764386296272278, "alphanum_fraction": 0.6764386296272278, "avg_line_length": 30.79310417175293, "blob_id": "b907bd6fa74e29df05cd2933e0a188dbf44cecd7", "content_id": "23092f6442ae6f32cbffe5231b1fce464a308db9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "no_license", "max_line_length": 77, "num_lines": 29, "path": "/GotchaDiablo/contacto/views.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom .forms import Contactform\nfrom django.core.mail import EmailMessage\nfrom django.urls import reverse\n\n# Create your views here.\ndef contacto(request):\n\tcontact_form = Contactform()\n\tif request.method == \"POST\":\n\t\tcontact_form = Contactform(data=request.POST)\n\t\tif contact_form.is_valid():\n\t\t\tname = request.POST.get('name', '')\n\t\t\temail = request.POST.get('email', '')\n\t\t\tcontent = request.POST.get('content', '')\n\t\t\t#senviamos el correo y direccionamos\n\t\t\temail = EmailMessage(\n\t\t\t\t\"Gotcha Diablo: Nuevo mensaje de contacto\",\n\t\t\t\t\"De: {} <{}> \\n\\nEscribio:\\n\\n{}\".format(name, email, content),\n\t\t\t\t\"[email protected]\",\n\t\t\t\t[\"[email protected]\"],\n\t\t\t\treply_to=[email]\n\t\t\t)\n\t\t\ttry:\n\t\t\t\temail.send()\n\t\t\t\treturn redirect(reverse('contacto')+\"?ok\")\n\t\t\texcept Exception as e:\n\t\t\t\treturn redirect(reverse('contacto')+\"?fail\")\n\n\treturn render(request, \"contacto/contacto.html\",{'formulario':contact_form})" }, { "alpha_fraction": 0.7672688364982605, "alphanum_fraction": 0.7693942785263062, "avg_line_length": 32.64285659790039, "blob_id": "c7ba8ac06ea2d6554453793c5fcb88a00129be62", "content_id": "1667c0a7de5490ff9272c880f0c31222affd21f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "no_license", "max_line_length": 71, "num_lines": 28, "path": "/GotchaDiablo/reservaciones/views.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.utils import timezone\nfrom django.views.generic.edit import CreateView\nfrom django.views.generic.list import ListView\nfrom .models import Reservacion \nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom .forms import ReservacionForm\n\n# Create your views here.\n\nclass Reservaciones(CreateView):\n\tmodel = Reservacion\n\tform_class = ReservacionForm\n\ttemplate_name = 'reservaciones/crear_reservacion.html'\n\tsuccess_url = reverse_lazy('reservacion')\n \n@method_decorator(staff_member_required, name='dispatch')\nclass ReservacionListView(ListView):\n\n model = Reservacion\n paginate_by = 20 # if pagination is desired\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['now'] = timezone.now()\n return context" }, { "alpha_fraction": 0.5951275825500488, "alphanum_fraction": 0.6009280681610107, "avg_line_length": 49.764705657958984, "blob_id": "04e13befaa5341fee73e937460fac25bf364c910", "content_id": "e6fe312382ace3b2159cb9293861fe73b193e346", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 112, "num_lines": 17, "path": "/GotchaDiablo/reservaciones/forms.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django import forms \nfrom .models import Reservacion\n\nclass ReservacionForm(forms.ModelForm):\n class Meta:\n model = Reservacion\n fields = ('nombre', 'pub_date', 'correo', 'telefono', 'paquete')\n widgets = {\n \t'nombre': forms.TextInput(attrs={'class':'form-control mb-5', 'placeholder':'Nombre'}),\n \t'pub_date': forms.DateInput(attrs={'class':'form-control mb-5', 'placeholder':'Fecha de reservaciรณn'}),\n \t'correo': forms.EmailInput(attrs={'class':'form-control mb-5', 'placeholder':'Correo'}), \n \t'telefono': forms.NumberInput(attrs={'class':'form-control mb-5', 'placeholder':'Telefono'}),\n 'paquete': forms.Select(attrs={'class':'form-control mb-5', 'placeholder':'Paquete'}),\n }\n labels = {\n \t'nombre':'','pub_date':'', 'correo':'', 'telefono':'', 'paquete':''\n }" }, { "alpha_fraction": 0.7256637215614319, "alphanum_fraction": 0.7256637215614319, "avg_line_length": 31.428571701049805, "blob_id": "80a20840e935b340f285de906eccbc1b0f5b946e", "content_id": "7a4a3d66ce02f117e54cc513f1803653f4211a84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 70, "num_lines": 7, "path": "/GotchaDiablo/core/urls.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import InicioListView, UbicacionPageView\n\nurlpatterns = [\n path('', InicioListView.as_view(), name=\"inicio\"),\n path('ubicacion/', UbicacionPageView.as_view(), name=\"ubicacion\"),\n]" }, { "alpha_fraction": 0.739047646522522, "alphanum_fraction": 0.741428554058075, "avg_line_length": 29, "blob_id": "1b54efb4e15dd2e1ba11c38766fb6d986f376795", "content_id": "353e729f470046db27973dfd703357af114f70ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2100, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/GotchaDiablo/galeria/views.py", "repo_name": "Kepler1987/pruebagotcha", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom .models import Foto, Video\nfrom .forms import FotoForm, VideoForm\n\n# Create your views here.\nclass FotosListView(ListView):\n model = Foto\n template_name = 'galeria/foto_list.html'\n paginate_by = 9\n\nclass FotosDetailView(DetailView):\n model = Foto\n\n@method_decorator(staff_member_required, name='dispatch')\nclass FotosAgrega(CreateView):\n model = Foto\n form_class = FotoForm\n success_url = reverse_lazy('fotos:fotos')\n\n@method_decorator(staff_member_required, name='dispatch')\nclass FotosUpdate(UpdateView):\n model = Foto\n form_class = FotoForm\n template_name_suffix = '_update_form'\n\n def get_success_url(self):\n \treturn reverse_lazy('fotos:actualizar', args=[self.object.id]) + '?ok'\n\n@method_decorator(staff_member_required, name='dispatch')\nclass FotosDelete(DeleteView):\n model = Foto\n success_url = reverse_lazy('fotos:fotos')\n\n\n\n\nclass VideosListView(ListView):\n model = Video\n template_name = 'galeria/video_list.html'\n paginate_by = 9\n\nclass VideosDetailView(DetailView):\n model = Video\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VideosAgrega(CreateView):\n model = Video\n form_class = VideoForm\n success_url = reverse_lazy('videos:videos')\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VideosUpdate(UpdateView):\n model = Video\n form_class = VideoForm\n template_name_suffix = '_update_form'\n\n def get_success_url(self):\n \treturn reverse_lazy('videos:actualizar', args=[self.object.id]) + '?ok'\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VideosDelete(DeleteView):\n model = Video\n success_url = reverse_lazy('videos:videos')\n" } ]
23
PKlimo/CryptoChallenge
https://github.com/PKlimo/CryptoChallenge
680f0c959aaf5a873010453edba304afa124aef7
06456565f3046d8818279039ad6e5a08994a3266
b0a34535b9ec09ec7a8265d1fc11f6a22af1fdde
refs/heads/master
2021-01-18T21:40:15.728110
2016-05-28T11:30:38
2016-05-28T11:30:38
50,387,493
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4524536728858948, "alphanum_fraction": 0.5540623068809509, "avg_line_length": 38.28799819946289, "blob_id": "0f7501cf9656f885cb00ba926985076ee3ab1edc", "content_id": "ff9c76b6c0a434aeab6e026e01e018e5bda3dcab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4911, "license_type": "no_license", "max_line_length": 541, "num_lines": 125, "path": "/set6/ch44/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Hash import SHA\nfrom Crypto.Util import number\nfrom Crypto.Random import random\nfrom collections import defaultdict\ndebug = True\np = number.bytes_to_long(b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x89\\xe1\\x85\\x52\\x18\\xa0\\xe7\\xda\\xc3\\x81\\x36\\xff\\xaf\\xa7\\x2e\\xda\\x78\\x59\\xf2\\x17\\x1e\\x25\\xe6\\x5e\\xac\\x69\\x8c\\x17\\x02\\x57\\x8b\\x07\\xdc\\x2a\\x10\\x76\\xda\\x24\\x1c\\x76\\xc6\\x2d\\x37\\x4d\\x83\\x89\\xea\\x5a\\xef\\xfd\\x32\\x26\\xa0\\x53\\x0c\\xc5\\x65\\xf3\\xbf\\x6b\\x50\\x92\\x91\\x39\\xeb\\xea\\xc0\\x4f\\x48\\xc3\\xc8\\x4a\\xfb\\x79\\x6d\\x61\\xe5\\xa4\\xf9\\xa8\\xfd\\xa8\\x12\\xab\\x59\\x49\\x42\\x32\\xc7\\xd2\\xb4\\xde\\xb5\\x0a\\xa1\\x8e\\xe9\\xe1\\x32\\xbf\\xa8\\x5a\\xc4\\x37\\x4d\\x7f\\x90\\x91\\xab\\xc3\\xd0\\x15\\xef\\xc8\\x71\\xa5\\x84\\x47\\x1b\\xb1')\nq = number.bytes_to_long(b'\\xf4\\xf4\\x7f\\x05\\x79\\x4b\\x25\\x61\\x74\\xbb\\xa6\\xe9\\xb3\\x96\\xa7\\x70\\x7e\\x56\\x3c\\x5b')\ng = number.bytes_to_long(b'\\x59\\x58\\xc9\\xd3\\x89\\x8b\\x22\\x4b\\x12\\x67\\x2c\\x0b\\x98\\xe0\\x6c\\x60\\xdf\\x92\\x3c\\xb8\\xbc\\x99\\x9d\\x11\\x94\\x58\\xfe\\xf5\\x38\\xb8\\xfa\\x40\\x46\\xc8\\xdb\\x53\\x03\\x9d\\xb6\\x20\\xc0\\x94\\xc9\\xfa\\x07\\x7e\\xf3\\x89\\xb5\\x32\\x2a\\x55\\x99\\x46\\xa7\\x19\\x03\\xf9\\x90\\xf1\\xf7\\xe0\\xe0\\x25\\xe2\\xd7\\xf7\\xcf\\x49\\x4a\\xff\\x1a\\x04\\x70\\xf5\\xb6\\x4c\\x36\\xb6\\x25\\xa0\\x97\\xf1\\x65\\x1f\\xe7\\x75\\x32\\x35\\x56\\xfe\\x00\\xb3\\x60\\x8c\\x88\\x78\\x92\\x87\\x84\\x80\\xe9\\x90\\x41\\xbe\\x60\\x1a\\x62\\x16\\x6c\\xa6\\x89\\x4b\\xdd\\x41\\xa7\\x05\\x4e\\xc8\\x9f\\x75\\x6b\\xa9\\xfc\\x95\\x30\\x22\\x91')\n\n\nclass MyDSASigner:\n def __init__(self):\n self.__x = 42\n self.y = pow(g, self.__x, p)\n\n def sign(self, h, priv_key=None, nonce=None):\n x = self.__x if priv_key is None else priv_key\n\n while True:\n k = random.StrongRandom().randint(1, q-1) if nonce is None else nonce\n r = pow(g, k, p) % q\n s2 = number.inverse(k, q) * (number.bytes_to_long(h) + x * r)\n s = pow(k, q-2, q) * (number.bytes_to_long(h) + x * r)\n assert(s2 == s)\n s = s % q\n if r != 0 and s != 0:\n self.k = k\n return (r, s)\n\n def verify(self, sig, h, pub_key=None):\n y = self.y if pub_key is None else pub_key\n r, s = sig\n if not 0 < r < q:\n return False\n if not 0 < s < q:\n return False\n w = number.inverse(s, q)\n u1 = (number.bytes_to_long(h) * w) % q\n u2 = (r * w) % q\n v = ((pow(g, u1, p) * pow(y, u2, p)) % p) % q\n return v == r\n\n\ndef extract_k(sig1, sig2, h1, h2):\n r1, s1 = sig1\n r2, s2 = sig2\n if r1 != r2:\n print(\"Different k was used\")\n return None\n h1 = int.from_bytes(h1, byteorder='big')\n h2 = int.from_bytes(h2, byteorder='big')\n hdiff = (h1 - h2) % q\n sdiff = (s1 - s2) % q\n k = hdiff * number.inverse(sdiff, q)\n k = k % q\n if debug:\n print(\"k:\", k)\n return k\n\n\ndef test_extract_k():\n print('=== Extract k from two signatures ===')\n myDSA = MyDSASigner()\n x = 42\n k = 57\n h1 = SHA.new(b'test message 1').digest()\n h2 = SHA.new(b'test message 2').digest()\n s1 = myDSA.sign(h1, x, k)\n s2 = myDSA.sign(h2, x, k)\n k1 = extract_k(s1, s2, h1, h2)\n k2 = extract_k(s2, s1, h2, h1)\n assert k1 == k2\n\n\ndef load_data():\n print('=== Load and check data from file ===')\n text = []\n import re\n d = defaultdict(list)\n with open(\"44.txt\", \"rt\") as in_file:\n for line in in_file:\n text += [line.strip()]\n for i in range(0, len(text), 4):\n m = re.match('msg: (.*)$', text[i]).group(1) + \" \"\n s = re.match('s: (\\d*)$', text[i+1]).group(1)\n r = re.match('r: (\\d*)$', text[i+2]).group(1)\n h = re.match('m: ([0-9a-f]*)$', text[i+3]).group(1)\n if hex(int.from_bytes(SHA.new(m.encode('utf-8')).digest(), byteorder='big'))[2:] != h:\n print(\"Wrong SHA1 hash h:\", h, 'for m:', m)\n d[int(r)].append((int(s), SHA.new(m.encode('utf-8')).digest()))\n return d\n\n\ndef main():\n x = None\n test_extract_k()\n d = load_data()\n print('=== Computing private key x ===')\n for r in d:\n if len(d[r]) > 1:\n s1, h1 = d[r][0]\n s2, h2 = d[r][1]\n k = extract_k((r, s1), (r, s2), h1, h2)\n hi1 = int.from_bytes(h1, byteorder='big')\n hi2 = int.from_bytes(h2, byteorder='big')\n x1 = ((s1*k - hi1) * number.inverse(r, q)) % q\n x2 = ((s2*k - hi2) * number.inverse(r, q)) % q\n assert x1 == x2\n if x is None:\n x = x1\n else:\n assert x == x1\n print('x:', x)\n print('=== Verifying SHA hash of private key x ===')\n xh = hex(x)[2:]\n ch = SHA.new(xh.encode('ascii')).digest()\n print('SHA-1 hash of x:', hex(int.from_bytes(ch, byteorder='big'))[2:])\n assert hex(int.from_bytes(ch, byteorder='big'))[2:] == \"ca8f6f7c66fa362d40760d135b763eb8527d3d52\"\n print('Hash is OK')\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.41808873414993286, "alphanum_fraction": 0.505972683429718, "avg_line_length": 17.3125, "blob_id": "9327c1faf5d1f6127c4102b601f1a00cb91f25db", "content_id": "f9f28ab4cc9fa35f584da528eb91eff4e57d14f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 73, "num_lines": 64, "path": "/set3/ch21/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\n\nmt = [0] * 624\nindex = 624\n\n\ndef print_state():\n from struct import pack\n from binascii import hexlify\n for i in range(0, 624):\n print(hexlify(pack(\">I\", mt[i])).decode(\"ascii\"), end=\"\")\n\n\ndef _int32(i):\n return int(0xFFFFFFFF & i)\n\n\ndef _high_mask(i):\n return int(0x80000000 & i)\n\n\ndef _low_mask(i):\n return int(0x7FFFFFFF & i)\n\n\ndef extract_number():\n global index\n if index >= 624:\n twist()\n y = mt[index]\n\n y ^= (y >> 11)\n y ^= (y << 7) & 0x9D2C5680\n y ^= (y << 15) & 0xEFC60000\n y ^= (y >> 18)\n\n index += 1\n return _int32(y)\n\n\ndef init(seed):\n mt[0] = _int32(seed)\n for i in range(1, 624):\n mt[i] = _int32(int(1812433253) * (mt[i-1] ^ (mt[i-1] >> 30)) + i)\n\n\ndef twist():\n global index\n for i in range(0, 624):\n x = _int32(_high_mask(mt[i]) + _low_mask(mt[(i+1) % 624]))\n xA = x >> 1\n if (x % 2) != 0:\n xA = xA ^ 0x9908B0DF\n mt[i] = mt[(i+397) % 624] ^ xA\n index = 0\n\n\nif __name__ == \"__main__\":\n init(42)\n # twist()\n # print_state()\n # twist()\n print(extract_number())\n" }, { "alpha_fraction": 0.5158483386039734, "alphanum_fraction": 0.5313859581947327, "avg_line_length": 30.54901885986328, "blob_id": "9f69679e51597d96b94964bb08cab433ae844a7a", "content_id": "d3f6557d6fa60fe534dc4329fb30485acc7b5f5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1609, "license_type": "no_license", "max_line_length": 120, "num_lines": 51, "path": "/set3/ch18/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nimport struct # pack\nimport sys # stderr\n\nclass CTR:\n def __init__(self, key, nonce):\n self.__key = key\n self.__nonce = nonce\n self.__count = 0\n\n def __xorstrings(self, txt, hes):\n if len(txt) != len(hes):\n print(\"Error xorstrings:\\nlength of text:\"+str(len(txt))+\"\\nlength of pass:\"+str(len(hes)), file=sys.stderr)\n return \"\"\n else:\n ret = []\n for i in range(len(txt)):\n ret += [txt[i] ^ hes[i]]\n return ret\n\n def __counter(self):\n old_val = struct.pack('<Q', self.__count)\n self.__count += 1\n return old_val\n\n def crypt(self, data):\n obj = AES.new(self.__key, AES.MODE_ECB)\n cdata = []\n for i in range(0, len(data) // 16): # crypt whole block\n pok = self.__nonce+self.__counter()\n keystream = obj.encrypt(pok)\n cdata += self.__xorstrings(data[i*16:(i+1)*16], keystream)\n # crypt incomplete last block\n pok = self.__nonce+self.__counter()\n keystream = obj.encrypt(pok)\n start = (len(data)//16)*16\n for i in range(start, len(data)):\n cdata += [data[i] ^ keystream[i-start]]\n return cdata\n\nif __name__ == \"__main__\":\n ctr = CTR(\"YELLOW SUBMARINE\", b'\\x00'*8)\n\n with open(\"data.b64\", \"rt\") as f:\n lines = f.readlines()\n import base64\n enc = base64.decodebytes(lines[0].encode('ascii'))\n dec = ctr.crypt(enc)\n print(\"\".join([chr(d) for d in dec]))\n" }, { "alpha_fraction": 0.48379969596862793, "alphanum_fraction": 0.5331369638442993, "avg_line_length": 25.115385055541992, "blob_id": "98d3c88e1c832ec7ed98e129a2bde0d05e1cf1e8", "content_id": "fe2fb4945de5a52fcc695c6b0e7b23748be6d460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1358, "license_type": "no_license", "max_line_length": 100, "num_lines": 52, "path": "/set3/ch23/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport mt19937\nimport numpy as np\nimport random\nimport struct\n\n\nclass PlayGround:\n def __init__(self):\n self.seed = random.getrandbits(32)\n self.rn = np.random.RandomState(self.seed)\n\n def getNumber(self):\n return struct.unpack(\"<L\", self.rn.bytes(4))[0]\n\n\nclass Sollution:\n def findSeed(self, pg):\n rn = [0] * 624\n tm = [0] * 624\n for i in range(624):\n rn[i] = pg.getNumber()\n tm[i] = mt19937.unextract_number(rn[i], i)\n mt = mt19937.untwist(tm)\n seed = mt19937.uninit(mt)\n return seed\n\n def createState(self, seed):\n mt = mt19937.init(seed)\n mt = mt19937.twist(mt)\n return mt\n\n def check(self, pg, mt, rounds):\n ret = True\n for i in range(624*rounds):\n if i % 624 == 0:\n mt = mt19937.twist(mt)\n pg_r = pg.getNumber()\n mt_r = mt19937.extract_number(mt, i % 624)\n if pg_r != mt_r:\n print(\"[ERROR] round {} wrong prediction: pg_r= {}, mt_r= {}\".format(i, pg_r, mt_r))\n ret = False\n return ret\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n sol = Sollution()\n seed = sol.findSeed(pg)\n mt = sol.createState(seed)\n print(\"Check:\", sol.check(pg, mt, 3))\n" }, { "alpha_fraction": 0.49983009696006775, "alphanum_fraction": 0.5079850554466248, "avg_line_length": 24.8157901763916, "blob_id": "0852ca76b063608c2d4d4a6f78dc3116fb5f7e72", "content_id": "7477f234818ce66ba2fb8a61ef1f0e44a2d585b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2943, "license_type": "no_license", "max_line_length": 73, "num_lines": 114, "path": "/set6/ch41/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\ndebug = True\n\n\ndef invmod(a, b):\n from Crypto.Util import number\n return number.inverse(a, b)\n\n\nclass RSA:\n def generate_keys(self, e):\n len = 1024\n if debug:\n len = 16 # DEBUG\n from Crypto.Util import number\n while True:\n p = number.getPrime(len)\n q = number.getPrime(len)\n n = p * q\n et = (p-1)*(q-1)\n d = invmod(e, et)\n if d != 1:\n break\n print(\"Finding private/public key\")\n if debug:\n print('n:', n, 'p:', p, 'q:', q, 'd:', d, 'e:', e)\n private_key = (d, n)\n public_key = (e, n)\n return (private_key, public_key)\n\n def encrypt(self, msg, pub_key):\n A, n = pub_key\n if type(msg) == int:\n m = msg\n else:\n m = int.from_bytes(msg, byteorder='big')\n return pow(m, A, n)\n\n def decrypt(self, msg, priv_key):\n a, n = priv_key\n msg = pow(msg, a, n)\n m = msg.to_bytes((msg.bit_length() // 8) + 1, byteorder='big')\n return m\n\n\nclass PlayGround:\n def __init__(self, priv_key, pub_key):\n self.__priv_key = priv_key\n self.pub_key = pub_key\n self.__rsa = RSA()\n self.decrypted = []\n\n def decrypt(self, msg):\n if msg in self.decrypted:\n return None\n else:\n self.decrypted += [msg]\n return self.__rsa.decrypt(msg, self.__priv_key)\n\n\nclass Player:\n def __init__(self, pg):\n self.pg = pg\n self.__msg = 42\n self.__rsa = RSA()\n\n def encrypt(self):\n return self.__rsa.encrypt(self.__msg, self.pg.pub_key)\n\n def test_1_dec(self, cip):\n dec = self.pg.decrypt(cip)\n d = int.from_bytes(dec, byteorder='big')\n if d == self.__msg:\n print('Decryption test passed, msg:', d)\n else:\n print('Decryption failed, msg:', self.__msg, 'decrypted:', d)\n\n def test_2_dec(self, cip):\n dec = self.pg.decrypt(cip)\n if dec is None:\n print(\"Test OK, second decryption shouldn't be allowed\")\n else:\n print(\"Test Failed, second decryption shouldn't be allowed\")\n\n\nclass Atacker:\n def __init__(self, pg):\n self.pg = pg\n self.__rsa = RSA()\n\n def crack(self, cip):\n e = self.pg.pub_key[0]\n N = self.pg.pub_key[1]\n dec = self.pg.decrypt(pow(2, e, N) * cip)\n dec = int.from_bytes(dec, byteorder='big')\n dec = (dec * invmod(2, N)) % N # dec = dec // 2\n print('Cracked message:', dec)\n\n\ndef main():\n rsa = RSA()\n priv_key, pub_key = rsa.generate_keys(7)\n pg = PlayGround(priv_key, pub_key)\n player = Player(pg)\n cip = player.encrypt()\n player.test_1_dec(cip)\n player.test_2_dec(cip)\n atacker = Atacker(pg)\n atacker.crack(cip)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7203390002250671, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 77.66666412353516, "blob_id": "b664a98dfa8382f4a039bf6bfe6218bf2d9e41bf", "content_id": "a979ece0d66bce94dba4a6576fec32cd664968cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 236, "license_type": "no_license", "max_line_length": 129, "num_lines": 3, "path": "/set2/ch09/Makefile", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "all:\n#\tgcc -std=c11 -march=native -o crypto_xor_find_more_bytes_key.exe crypto_xor_find_more_bytes_key.c\n\tgcc -std=c11 -Wall -Wextra -pedantic -Werror -Wshadow -Wstrict-overflow -fno-strict-aliasing -march=native -o pkcs7.exe pkcs7.c\n" }, { "alpha_fraction": 0.4969102144241333, "alphanum_fraction": 0.5256270170211792, "avg_line_length": 27.957895278930664, "blob_id": "825d5750920400598147c7fab5ebd218d701f500", "content_id": "6d546392b8a849da161d600722a3588ff3af3f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2751, "license_type": "no_license", "max_line_length": 123, "num_lines": 95, "path": "/set3/ch17/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\ndebug = True\n\n\ndef print_debug_block(msg, data):\n print(msg)\n for i in range(len(data) // 16):\n print(data[i*16:(i+1)*16])\n\n\nclass PKCS7:\n def __init__(self, base):\n self.__base = base\n\n def encode(self, data):\n p_len = self.__base - (len(data) % self.__base)\n if p_len == 0:\n p_len = self.__base\n data += bytes([p_len])*p_len\n return data\n\n def decode(self, data):\n pad = data[-1]\n if not (0 < pad <= self.__base):\n return False\n for i in range(len(data)-pad, len(data)):\n if data[i] != pad:\n return False\n data = data[:-pad]\n return True\n\n\nclass PlayGround:\n def __init__(self, fn):\n self.__pkcs = PKCS7(16)\n self.__passwd = Random.new().read(16)\n with open(fn, \"rt\") as f:\n lines = f.readlines()\n import random\n l = random.randint(0, len(lines)-1)\n import base64\n self.__secret = base64.decodebytes(lines[l].encode('ascii'))\n\n def enc(self):\n iv = Random.new().read(16)\n obj = AES.new(self.__passwd, AES.MODE_CBC, iv)\n msg = self.__pkcs.encode(self.__secret)\n cip = obj.encrypt(msg)\n return iv+cip\n\n def dec(self, input):\n obj = AES.new(self.__passwd, AES.MODE_CBC, input[0:16])\n cip = obj.decrypt(input[16:])\n # print_debug_block(\"msg: \", cip)\n return self.__pkcs.decode(cip)\n\n\ndef dec_byte(pblock, nblock, pos, known):\n ret = []\n for i in range(0, 256):\n g_block = bytearray(pblock)\n for p in range(1, pos):\n g_block[16-p] = pblock[16-p] ^ known[p-1] ^ pos\n g_block[16-pos] = pblock[16-pos] ^ i ^ pos\n g_block = bytes(g_block)\n if pg.dec(g_block+nblock):\n ret += [i]\n return ret\n\n\ndef dec_block(pblock, nblock):\n known = []\n for i in range(1, 17):\n found = dec_byte(pblock, nblock, i, known)\n if len(found) == 1:\n known += found\n elif len(found) == 2: # if there are two possibility, e.g. \\x02\\x01 and \\x02\\x02 (both correct padding)\n if len(dec_byte(pblock, nblock, i+1, known+[found[0]])) == 0: # try one more step with first one and if failed\n known += [found[1]] # return second quess\n else:\n known += [found[0]]\n return \"\".join([chr(k) for k in reversed(known)])\n\nif __name__ == \"__main__\":\n pg = PlayGround('data.b64')\n data = pg.enc()\n\n msg = \"\"\n for i in range(0, (len(data) // 16) - 1):\n msg += dec_block(data[i*16:(i+1)*16], data[(i+1)*16:(i+2)*16])\n print(msg)\n" }, { "alpha_fraction": 0.5156576037406921, "alphanum_fraction": 0.5459290146827698, "avg_line_length": 21.809524536132812, "blob_id": "2173f753d4bc2daf03a5015a30137edb607dd976", "content_id": "707623e1c86863bc685e09d2845c43315c090dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 52, "num_lines": 42, "path": "/set3/ch22/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\n\nimport mt19937 as rng\nimport random\nimport time\n\n\nclass PlayGround:\n def __init__(self):\n self.d = 40\n self.h = 1000\n self.__sleep(random.randint(self.d, self.h))\n self.__seed = int(time.time())\n self.__gn = rng.MT19937(self.__seed)\n\n def __sleep(self, cas):\n time.sleep(int(cas))\n\n def get_rand(self):\n self.__sleep(random.randint(self.d, self.h))\n return self.__gn.extract_number()\n\n def check(self, guess):\n return self.__seed == guess\n\n\ndef crack(rand):\n t = int(time.time())\n for i in range(2500, 0, -1):\n gn = rng.MT19937(t-i)\n if gn.extract_number() == rand:\n print(\"Seed is: \", t-i)\n print(\"Check: \", pg.check(t-i))\n return t-i\n print(\"Seek not found\")\n return 0\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n rand = pg.get_rand()\n crack(rand)\n" }, { "alpha_fraction": 0.6791666746139526, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 29, "blob_id": "7a703ea6b0aa4d0b9e9ab5e543aafb249f695bd1", "content_id": "6c527f054e784d231c31548b4b8fdba5da7322fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 72, "num_lines": 8, "path": "/set1/ch1/test.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nsh1 = 'cat input.txt | ./sollution.py'\nsh2 = 'cat input.txt | ./sollution.py | cmp -lb output.txt | head -n 20'\n\nimport sarge\nprint(sarge.capture_stdout(sh1).stdout.text)\nprint(sarge.capture_stdout(sh2).stdout.text)\n" }, { "alpha_fraction": 0.4858199656009674, "alphanum_fraction": 0.5098643898963928, "avg_line_length": 23.57575798034668, "blob_id": "c82ccf3efd239a097d0a243ce6944f64dd250707", "content_id": "312f8d50fc05d19ecca45a45d1de73995c0bdaa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1622, "license_type": "no_license", "max_line_length": 70, "num_lines": 66, "path": "/set5/ch39/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\n\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\ndef invmod(a, b):\n # https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n gcd, x, y = egcd(a, b)\n if gcd == 1:\n return (x % b)\n# invmod(17, 3120) is 2753\nassert(invmod(17, 3120) == 2753)\n\n\nclass RSA:\n def generate_keys(self):\n from Crypto.Util import number\n print(\"Finding first prime number\")\n p = number.getPrime(1024)\n print(\"Finding second prime number\")\n q = number.getPrime(1024)\n n = p * q\n et = (p-1)*(q-1)\n e = 3\n print(\"Finding private/public key\")\n while True:\n d = invmod(e, et)\n if d is not None:\n break\n else:\n e += 1\n private_key = (d, n)\n public_key = (e, n)\n return (private_key, public_key)\n\n def encrypt(self, msg, pub_key):\n A, n = pub_key\n m = int.from_bytes(msg, byteorder='big')\n return pow(m, A, n)\n\n def decrypt(self, msg, priv_key):\n a, n = priv_key\n msg = pow(msg, a, n)\n m = msg.to_bytes((msg.bit_length() // 8) + 1, byteorder='big')\n return m\n\n\ndef main():\n msg = b'test'\n print(\"message:\", msg)\n rsa = RSA()\n priv_key, pub_key = rsa.generate_keys()\n enc = rsa.encrypt(msg, pub_key)\n dec = rsa.decrypt(enc, priv_key)\n print(\"message after ancoding / decoding:\", dec)\n assert(msg == dec)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.43355289101600647, "alphanum_fraction": 0.49606096744537354, "avg_line_length": 26.264083862304688, "blob_id": "290c98a15d83bf2a6d1354d2ec9cd7307dc25f5d", "content_id": "97dcc5af42892659b1a537b2b7915cac811dd98a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7743, "license_type": "no_license", "max_line_length": 111, "num_lines": 284, "path": "/set3/ch23/mt19937.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom struct import pack\nfrom binascii import hexlify\n\n\ndef print_state(mt):\n for i in range(0, 624):\n print(hexlify(pack(\">I\", mt[i])).decode(\"ascii\"), end=\"\")\n\n\ndef print_state_part(mt, i):\n if not 0 <= i <= 623:\n print(\"Index out of range <0,623>, i: \", i)\n return\n print(\"mt[\", i, \"]: \", hexlify(pack(\">I\", mt[i])).decode(\"ascii\"), sep=\"\")\n\n\ndef print_hex(msg, i):\n print(msg, \"{0:x}\".format(i))\n\n\ndef _int32(i):\n return int(0xFFFFFFFF & i)\n\n\ndef _high_mask(i):\n return int(0x80000000 & i)\n\n\ndef _low_mask(i):\n return int(0x7FFFFFFF & i)\n\n\ndef extract_number(mt, index):\n y = mt[index]\n\n # print_hex(\"1:\", y)\n y ^= (y >> 11)\n # print_hex(\"2:\", y)\n y ^= (y << 7) & 0x9D2C5680\n # print_hex(\"3:\", y)\n y ^= (y << 15) & 0xEFC60000\n # print_hex(\"4:\", y)\n y ^= (y >> 18)\n # print_hex(\"5:\", y)\n\n return _int32(y)\n\n\ndef unextract_number(rnd, index):\n y = rnd\n y ^= (y >> 18)\n # print_hex(\"4:\", y)\n y ^= (y << 15) & 0xEFC60000\n # print_hex(\"3:\", y)\n y ^= ((y << 7) & 0x9D2C5680) ^ ((y << 14) & 0x94284000) ^ ((y << 21) & 337641472) ^ ((y << 28) & 268435456)\n # print_hex(\"2:\", y)\n y ^= (y >> 11) ^ (y >> 22)\n # print_hex(\"1:\", y)\n return y\n\n\ndef init(seed):\n _mt = [0] * 624\n _mt[0] = _int32(seed)\n for i in range(1, 624):\n _mt[i] = _int32(int(1812433253) * (_mt[i-1] ^ (_mt[i-1] >> 30)) + i)\n return _mt\n\n\ndef uninit(mt):\n # from init state return seed\n if not is_init(mt):\n # print(\"state is not init state\")\n return\n\n def egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n def inverse():\n # https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n gcd, x, y = egcd(1812433253, 2**32)\n if gcd == 1:\n return (x % 2**32)\n\n pok = _int32(int(inverse())*int(mt[1]-1))\n seed = pok ^ (pok >> 30)\n return seed\n\n\ndef twist_part(mt, i):\n x = _int32(_high_mask(mt[i]) + _low_mask(mt[(i+1) % 624]))\n xA = x >> 1\n if (x % 2) != 0:\n xA = xA ^ 0x9908B0DF\n return mt[(i+397) % 624] ^ xA\n\n\ndef twist(mt):\n _mt = list(mt)\n for i in range(0, 624):\n _mt[i] = twist_part(_mt, i)\n return _mt\n\n\ndef untwist_part_compute(cur, prev, next, oposite, oposite_prev):\n xA = cur ^ oposite\n if (next % 2) != 0:\n xA = xA ^ 0x9908B0DF\n x = xA << 1\n if (next % 2) != 0:\n x += 1\n h = _int32(_high_mask(x))\n\n xA1 = prev ^ oposite_prev # if last bit of untwisted mt[i] = 0\n xA2 = xA1 ^ 0x9908B0DF # if last bit of untwisted mt[i] = 1\n x1 = xA1 << 1\n x2 = (xA2 << 1) + 1\n l1 = _int32(_low_mask(x1))\n l2 = _int32(_low_mask(x2))\n return [h + l1, h + l2]\n\n\ndef untwist_part_guess(mt, mtg, i):\n cur = mt[i]\n next = mt[(i + 1) % 624]\n prev = mt[(i - 1) % 624]\n if i > 227:\n oposite = mt[(i+397) % 624]\n oposite_prev = mt[(i+396) % 624]\n elif i == 227:\n oposite = mt[(i+397) % 624]\n oposite_prev = mtg[(i+396) % 624]\n elif i < 227:\n oposite = mtg[(i+397) % 624]\n oposite_prev = mtg[(i+396) % 624]\n\n return untwist_part_compute(cur, prev, next, oposite, oposite_prev)\n\n\ndef untwist_part_check(mt, mtg, i):\n if twist_part(mtg, i) == mt[i]:\n return mtg\n\n guess_even, guess_odd = untwist_part_guess(mt, mtg, i)\n mte = list(mtg)\n mte[i] = guess_even\n if twist_part(mte, i) == mt[i]:\n return mte\n mto = list(mtg)\n mto[i] = guess_odd\n if twist_part(mto, i) == mt[i]:\n return mto\n return mtg\n\n\ndef debug_untwist(mt, mto, mtp, mtn, i, mt_00, mt_01, mt_10, mt_11):\n if -225 <= i <= 227:\n print(i, \":\", sep=\"\")\n print(\"old state \", \" \"*6, end=\"\")\n print_state_part(mto, i)\n print(\"new state \", \" \"*6, end=\"\")\n print_state_part(mt, i)\n print(\"guess even state \", end=\"\")\n print_state_part(mtp, i)\n print(\"guess odd state \", end=\"\")\n print_state_part(mtn, i)\n print_hex(\"twist even state \", twist_part(mtp, i))\n print_hex(\"twist odd state \", twist_part(mtn, i))\n print_hex(\"twist state \", twist_part(mt_00, i))\n print_hex(\"twist state \", twist_part(mt_01, i))\n print_hex(\"twist state \", twist_part(mt_10, i))\n print_hex(\"twist state \", twist_part(mt_11, i))\n\n\ndef untwist(mt):\n mtp = list(mt) # even\n mtn = list(mt) # odd\n mtg = list(mt) # good\n for i in range(623, -1, -1):\n guess_even, guess_odd = untwist_part_guess(mtp, mtg, i)\n mtp[i] = guess_even\n mtn[i] = guess_odd\n if i < 623:\n mt_00 = list(mtg)\n mt_00[i+1] = mtp[i+1]\n mt_00[i] = guess_even\n mt_01 = list(mtg)\n mt_01[i+1] = mtp[i+1]\n mt_01[i] = guess_odd\n mt_10 = list(mtg)\n mt_10[i+1] = mtn[i+1]\n mt_10[i] = guess_even\n mt_11 = list(mtg)\n mt_11[i+1] = mtn[i+1]\n mt_11[i] = guess_odd\n # debug_untwist_1(mt, mto, mtp, mtn, i, mt_00, mt_01, mt_10, mt_11)\n if twist_part(mt_00, i) == mt[i]:\n mtg[i+1] = mtp[i+1]\n elif twist_part(mt_01, i) == mt[i]:\n mtg[i+1] = mtp[i+1]\n elif twist_part(mt_10, i) == mt[i]:\n mtg[i+1] = mtn[i+1]\n elif twist_part(mt_11, i) == mt[i]:\n mtg[i+1] = mtn[i+1]\n else:\n if i != 226: # I am solving part 227 later\n print(\"unknown good for i=\", i)\n # debug_untwist(mt, mto, mtp, mtn, i, mt_00, mt_01, mt_10, mt_11)\n if i == 0:\n if twist_part(mtp, 0) == mt[0]:\n mtg[0] = guess_even\n elif twist_part(mtn, 0) == mt[0]:\n mtg[0] = guess_odd\n else:\n pass\n # print(\"unknown good\")\n # debug_untwist(mt, mto, mtp, mtn, i, mt_00, mt_01, mt_10, mt_11)\n # print_states(\"mt/mtg\", mt, mtg)\n # solve part 227\n # import pdb\n # pdb.set_trace()\n mtg = untwist_part_check(mt, mtg, 227)\n mtg = untwist_part_check(mt, mtg, 226)\n mtg = untwist_part_check(mt, mtg, 0)\n return mtg\n\n\ndef compare_states(kon, mt1, mt2):\n ret = True\n for i in range(623, kon, -1):\n if mt1[i] != mt2[i]:\n print(\"differance in part \", i)\n print_state_part(mt1, i)\n print_state_part(mt2, i)\n ret = False\n return ret\n\n\ndef print_states(nazov, st1, st2):\n for i in range(615, 624):\n print_state_part(st1, i)\n print_state_part(st2, i)\n\n\ndef is_init(mt):\n for i in range(2, 624):\n if mt[i] != _int32(int(1812433253) * (mt[i-1] ^ (mt[i-1] >> 30)) + i):\n return False\n return True\n\n\nif __name__ == \"__main__\":\n mt = [0] * 624\n mt = init(4190403025)\n mt_next = twist(mt)\n\n # untwist() test coverage\n mt_next_prev = untwist(mt_next)\n compare_states(0, mt, mt_next_prev) # mt[0] is not recoverable\n compare_states(-1, mt_next, twist(mt_next_prev))\n\n # is_init() test coverage\n print(\"is_init(mt):\", is_init(mt))\n print(\"is_init(mt_next):\", is_init(mt_next))\n print(\"is_init(mt_next_prev):\", is_init(mt_next_prev))\n print()\n\n # uninit()\n print(\"seed:\", uninit(mt))\n print(\"seed:\", uninit(mt_next))\n print(\"seed:\", uninit(mt_next_prev))\n print()\n\n # unextract_number()\n print_state_part(mt_next, 123)\n rn = extract_number(mt_next, 123)\n print_hex(\"rnd number from mt[123]:\", rn)\n print_hex(\"recovered state from rnd:\", unextract_number(rn, 123))\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.5827886462211609, "avg_line_length": 33, "blob_id": "28c345f741e18180bebed738c9550da068a10886", "content_id": "4a4da8aa916b346809d2a90d880e6391899c1663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 94, "num_lines": 27, "path": "/set4/ch25/test.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport sollution\nimport unittest\n\n\nclass crypt(unittest.TestCase):\n def setUp(self):\n self.pg = sollution.PlayGround()\n self.key = b\"YELLOW SUBMARINE\"\n\n def test_crypt(self):\n testData = [b\"test string\", b\"much longer test string, 12345 67890, asdfghjkl\"]\n for msg in testData:\n self.assertEqual(msg,\n self.pg.crypt(self.pg.crypt(msg, self.key), self.key),\n \"[ERROR] in fnc crypt for input: {}\".format(msg))\n\n def test_edit(self):\n msg = b\"aaa bbb ccc\"\n enc = self.pg.crypt(msg, self.key)\n edt = self.pg.edit(enc, self.key, 4, b\"xxx\")\n dec = self.pg.crypt(edt, self.key)\n self.assertEqual(dec, b\"aaa xxx ccc\", \"[ERROR] in fnc edit for input: {}\".format(msg))\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n" }, { "alpha_fraction": 0.4816513657569885, "alphanum_fraction": 0.747706413269043, "avg_line_length": 33.880001068115234, "blob_id": "6b4ee48866cff02f0aa504d5be4c534c37d68e85", "content_id": "c5922de080f5bffeb59d8d0f5fb7df685f783e50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 872, "license_type": "no_license", "max_line_length": 390, "num_lines": 25, "path": "/set5/ch33/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport random\n\np = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\ng = 2\n\na = random.randint(0, p)\nprint('private key for a:', a)\n\nA = pow(g, a, p) # A = (g**a) % p\nprint('public key for a:', A)\n\nb = random.randint(0, p)\nprint('private key for b:', b)\n\nB = pow(g, b, p) # B = (g**b) % p\nprint('public key for b:', B)\n\nsa = pow(B, a, p) # sa = (B**a) % p\nprint('shared secreto for a:', sa)\n\nsb = pow(A, b, p) # sb = (A**b) % p\nprint('shared secreto for b:', sb)\n\nassert(sa == sb) # secret must be the same\n" }, { "alpha_fraction": 0.5443786978721619, "alphanum_fraction": 0.5680473446846008, "avg_line_length": 15.899999618530273, "blob_id": "e8b79b232dd1759d5cf57cffc8c702a7ab5337c1", "content_id": "aedb6e6c392cc6dcff6c853d69f9040d5c95cb64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 169, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/set3/ch17/conv.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f tmp.txt\nfor line in $(cat data.b64)\ndo\n echo \"$line\" | base64 -d >> tmp.txt\n echo >> tmp.txt\ndone\n\ncat tmp.txt | sort | uniq > data.txt\n" }, { "alpha_fraction": 0.6037735939025879, "alphanum_fraction": 0.6603773832321167, "avg_line_length": 16.66666603088379, "blob_id": "30363c89b1a036dae0e02fdbb36941327b523c73", "content_id": "c5ef9c3b4c8315075a8f6a551ea5f6a75d1575bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 53, "license_type": "no_license", "max_line_length": 39, "num_lines": 3, "path": "/set2/ch09/run_c.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n./pkcs7.exe 20 \"YELLOW SUBMARINE\" | xxd\n" }, { "alpha_fraction": 0.7458563446998596, "alphanum_fraction": 0.7569060921669006, "avg_line_length": 89.5, "blob_id": "7c22554f1cc1633c2e5a7142871116387c91479a", "content_id": "011405de62a31bafc19726dbb220bc5870022bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 181, "license_type": "no_license", "max_line_length": 175, "num_lines": 2, "path": "/set1/ch3/Makefile", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "all:\n\tgcc -std=c11 -Wall -Wextra -pedantic -Werror -Wshadow -Wstrict-overflow -fno-strict-aliasing -march=native -o crypto_xor_find_one_byte_key.exe crypto_xor_find_one_byte_key.c\n" }, { "alpha_fraction": 0.48345035314559937, "alphanum_fraction": 0.60280841588974, "avg_line_length": 37.346153259277344, "blob_id": "078797d23569d17e33082a1bab9d0ace7090df1f", "content_id": "1c9efb9586b8bb6c3e1a8972c1211a04945473ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 147, "num_lines": 26, "path": "/set4/ch32/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport timeit\n\n\ndef main():\n print('http://localhost:9000/test?file=foo&signature=9fc254126c2b1b7f106abacae0cb77e73411fad7')\n # f = urllib.request.urlopen('http://localhost:9000/test?file=foo&signature=9fc254126c2b1b7f106abacae0cb77e73411fad7')\n # print(f.read())\n # s = \"import urllib.request\\nurllib.request.urlopen('http://localhost:9000/test?file=foo&signature=9fc254126c2b1b7f106abacae0cb77e73411fad7')\"\n\n sig_ok = \"9fc254126c2b1b7f106abacae0cb77e73411fad7\"\n sig = \"\"\n for j in range(40):\n tt = {}\n for i in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']:\n s = \"import urllib.request\\nurllib.request.urlopen('http://localhost:9000/test?file=foo&signature=\" + sig + i + \"')\"\n t = timeit.Timer(s)\n tt[i] = t.timeit(number=10)\n sig += max(tt, key=tt.get)\n print(j, sig)\n\n if sig == sig_ok:\n print(\"Succes\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4197952151298523, "alphanum_fraction": 0.430521696805954, "avg_line_length": 27.095890045166016, "blob_id": "ea9881a75afaca2d8de0e10c3bebb9d2546e0a1e", "content_id": "e386fd1d0caa14a43d171eed817e6cca1506e727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2051, "license_type": "no_license", "max_line_length": 78, "num_lines": 73, "path": "/set1/ch3/crypto_xor_find_one_byte_key.c", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#include <stdlib.h> //exit\n#include <ctype.h> //isalnum\n#include <stdint.h>\n#include <stdio.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <sys/mman.h>\n\nstruct stat sb;\nuint8_t *p;\n\nvoid read_into_memory(char *fname) {\n int fd = open (fname, O_RDONLY);\n if (fd == -1) {\n perror (\"open\");\n exit(EXIT_FAILURE);\n }\n if (fstat (fd, &sb) == -1) {\n perror (\"fstat\");\n exit(EXIT_FAILURE);\n }\n if (!S_ISREG (sb.st_mode)) {\n fprintf (stderr, \"%s is not a file\\n\", fname);\n exit(EXIT_FAILURE);\n }\n p = mmap (0, sb.st_size, PROT_READ, MAP_SHARED, fd, 0);\n if (p == MAP_FAILED) {\n perror (\"mmap\");\n exit(EXIT_FAILURE);\n }\n if (close (fd) == -1) {\n perror (\"close\");\n exit(EXIT_FAILURE);\n }\n}\n\nvoid free_memory() {\n if (munmap (p, sb.st_size) == -1) {\n perror (\"munmap\");\n exit(EXIT_FAILURE);\n }\n}\n\nint main (int argc, char *argv[]){\n if (argc < 2) {\n fprintf (stderr, \"usage: %s <file>\\n\", argv[0]);\n exit(EXIT_FAILURE);\n }\n\n read_into_memory(argv[1]); //fill global pointer (array) p\n\n uint8_t key = 0, best_key, dec;\n int score, best_score = 0;\n\n for (int i = 0; i < 256; i++){\n score = 0, key++;\n for (off_t len = 0; len < sb.st_size; len++) {\n dec = p[len] ^ key;\n if (dec == ' ') score += 5;\n else if (isalnum(dec)) score += 1;\n }\n if (score > best_score)\n best_score = score, best_key = key;\n }\n printf(\"%i;%c;\", best_score, best_key);\n for (off_t len = 0; len < sb.st_size; len++) putchar(p[len]^best_key);\n putchar('\\n');\n\n free_memory();\n exit(EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.5390334725379944, "alphanum_fraction": 0.565055787563324, "avg_line_length": 31.93877601623535, "blob_id": "95e749779b64d0c38d5e25ed4d71319c7d8cccda", "content_id": "5b1fef351b760ca6206a06c5a4b1c667d035745e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 115, "num_lines": 49, "path": "/set3/ch24/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport struct # pack\nimport random\nimport sys\nimport string\nimport numpy as np\n\n\nclass PlayGround:\n def __init__(self):\n self.__key = random.getrandbits(16)\n self.__msg = ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randint(4, 18))) + \"A\"*14\n\n # Write the function that does this for MT19937 using a 16-bit seed\n def crypt(self, data, key):\n self.rn = np.random.RandomState(key)\n output = \"\"\n for i, c in enumerate(data):\n if i % 4 == 0:\n ks = struct.unpack(\"<BBBB\", self.rn.bytes(4))\n j = 3 - (i % 4)\n output += chr(ks[j] ^ ord(c))\n return output\n\n def test_crypt(self):\n for _ in range(10):\n m = ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randint(4, 18))) + \"A\"*14\n k = random.getrandbits(16)\n assert m == self.crypt(self.crypt(m, k), k), \"Crypto function error for msg {} and key {}\".format(m, k)\n\n def ciphertext(self):\n return self.crypt(self.__msg, self.__key)\n\n def check_key(self, k):\n sys.exit(\"Key is correct\" if k == self.__key else \"Key is incorrect\")\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n pg.test_crypt() # Verify that you can encrypt and decrypt properly\n\n # From the ciphertext, recover the \"key\" (the 16 bit seed)\n enc = pg.ciphertext()\n t = \"X\"*(len(enc)-14) + \"A\"*14\n for k in range(2**16):\n if pg.crypt(t, k)[-14:] == enc[-14:]:\n print(\"key:\", k)\n pg.check_key(k)\n" }, { "alpha_fraction": 0.5249637961387634, "alphanum_fraction": 0.6248190999031067, "avg_line_length": 34.43589782714844, "blob_id": "2711b8cdcfc0fe228374843b9aece0862ed85aad", "content_id": "bfcb764ccd908a892e8c136245ddf2a9aedef1ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2764, "license_type": "no_license", "max_line_length": 390, "num_lines": 78, "path": "/set5/ch36/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport random\nimport hashlib\nimport hmac\n\np = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\ng = 2\nk = 3\n\n\nclass Server:\n def register(self, email, password, salt):\n self.email = email.encode('utf-8') if type(email) == str else email\n self.salt = salt.encode('utf-8') if type(salt) == str else salt\n passwd = password.encode('utf-8') if type(password) == str else password\n xH = hashlib.sha256(self.salt + passwd).hexdigest()\n x = int(xH, 16)\n self.verifier = pow(g, x, p)\n\n def login(self, email, A):\n self.A = A\n self.__b = random.randint(1, p)\n self.B = k*self.verifier+pow(g, self.__b, p)\n\n def compute_key(self):\n u = hashlib.sha256(str(self.A+self.B).encode('utf-8')).hexdigest().encode('utf-8')\n u = int.from_bytes(u, byteorder='big')\n s = self.A * pow(self.verifier, u, p)\n s = pow(s, self.__b, p)\n self.__K = hashlib.sha256(str(s).encode('utf-8')).hexdigest().encode('utf-8')\n\n def check_hmac(self, h2):\n h1 = hmac.new(self.__K, self.salt, hashlib.sha256).digest()\n if h1 == h2:\n print('HMAC is the same, login OK')\n else:\n print('HMAC is different, login FAILED')\n\n\nclass Client:\n def __init__(self):\n self.__a = random.randint(1, p)\n self.A = pow(g, self.__a, p)\n self.email = b'[email protected]'\n self.__pass = b'passwd'\n self.salt = b'saltuseratmaildotcom'\n\n def get_pass(self):\n return self.__pass\n\n def compute_key(self, B):\n self.B = B\n u = hashlib.sha256(str(self.A+self.B).encode('utf-8')).hexdigest().encode('utf-8')\n u = int.from_bytes(u, byteorder='big')\n xH = hashlib.sha256(self.salt + self.__pass).hexdigest()\n x = int(xH, 16)\n s = self.B - k*pow(g, x, p)\n s = pow(s, self.__a+u*x, p)\n self.__K = hashlib.sha256(str(s).encode('utf-8')).hexdigest().encode('utf-8')\n\n def compute_hmac(self):\n h = hmac.new(self.__K, self.salt, hashlib.sha256).digest()\n return h\n\n\ndef main():\n server = Server()\n client = Client()\n server.register(client.email, client.get_pass(), client.salt)\n server.login(client.email, client.A)\n client.compute_key(server.B)\n server.compute_key()\n h = client.compute_hmac()\n server.check_hmac(h)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7378752827644348, "alphanum_fraction": 0.7505773901939392, "avg_line_length": 49.94117736816406, "blob_id": "c54d05f1983eb4c1d279f1d11d78f25e08275d5c", "content_id": "4c546c7fad5c07a7295cfacb48567f463b54aec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 866, "license_type": "no_license", "max_line_length": 113, "num_lines": 17, "path": "/set1/ch3/README.md", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "implementation in C:\nname: `crypto_xor_find_one_byte_key`\ndescription: find the best key (byte) for which the xored output has the biggest score (most likely english text)\ninput parameter: file in binary mode\noutput: one text line in form: score;key;decoded text\nscore: integer (for text file: space:5pt, 5 most common chars: 3pt, printable chars: 1pt\nkey: the best byte in ASCII form (see view output hint for printing non ASCII value via xxd)\ndecoded text: ASCII output decoded (xored) by key byte\nhint: use xxd to work with input/output binary datas\n# convert binary file to text file in hex form\nxxd -u -p data.bin | tr -d \"\\n\"\n# binary file display 10 bytes in binary representation, each line contains 5 bytes\nxxd -l 10 -g 1 -c 5 -b data.bin\n# convert text hex file into binary\nxxd -p -r input.txt > input.bin\n#viev output:\n`./crypto_xor_byte file.bin | xxd`\n" }, { "alpha_fraction": 0.5701388716697693, "alphanum_fraction": 0.6013888716697693, "avg_line_length": 25.18181800842285, "blob_id": "4591dfb955ac079802deb5adb7f70815cd1037f4", "content_id": "5727d67dfb7b6af208177b545907bb06859dd6e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 99, "num_lines": 55, "path": "/set4/ch31/server.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport urllib\nimport hmac\nimport hashlib\n\nhostName = \"localhost\"\nhostPort = 9000\n\n\nclass PlayGround:\n def __init__(self):\n self.__key = b\"key\"\n self.__sleep = 0.05\n\n def check(self, msg, mac_prov):\n return self.__compare(mac_prov, hmac.new(self.__key, msg, hashlib.sha1).hexdigest())\n\n def __compare(self, a, b):\n for i in range(40):\n if a[i] != b[i]:\n return False\n time.sleep(self.__sleep)\n return True\n\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n parsed_path = urllib.parse.urlparse(self.path)\n arg = urllib.parse.parse_qs(parsed_path[4])\n fn = arg['file'][0]\n sig = arg['signature'][0]\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n if pg.check(fn.encode('ascii'), sig):\n self.wfile.write(bytes(\"OK\", \"utf-8\"))\n else:\n self.wfile.write(bytes(\"invalid\", \"utf-8\"))\n\n\ndef main():\n myServer = HTTPServer((hostName, hostPort), MyServer)\n print('http://localhost:9000/test?file=foo&signature=9fc254126c2b1b7f106abacae0cb77e73411fad7')\n try:\n myServer.serve_forever()\n except KeyboardInterrupt:\n pass\n myServer.server_close()\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n main()\n" }, { "alpha_fraction": 0.6639004349708557, "alphanum_fraction": 0.6680498123168945, "avg_line_length": 20.582090377807617, "blob_id": "6dbe9bbfc6a8812f857dbc09e21c0a3859ff659d", "content_id": "9378c7ad026a3168aa3050b1f00d3058d656f1ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1446, "license_type": "no_license", "max_line_length": 84, "num_lines": 67, "path": "/set5/ch37/pysrp.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n\nimport srp\n\n# The salt and verifier returned from srp.create_salted_verification_key() should be\n# stored on the server.\nsalt, vkey = srp.create_salted_verification_key('testuser', 'testpassword')\n\n\nclass AuthenticationFailed (Exception):\n pass\n\n\n# ~~~ Begin Authentication ~~~\n\nusr = srp.User('testuser', 'testpassword')\nuname, A = usr.start_authentication()\n\n# The authentication process can fail at each step from this\n# point on. To comply with the SRP protocol, the authentication\n# process should be aborted on the first failure.\n\n# Client => Server: username, A\nsvr = srp.Verifier(uname, salt, vkey, A)\ns, B = svr.get_challenge()\n\nif s is None or B is None:\n raise AuthenticationFailed()\n\n# Server => Client: s, B\nM = usr.process_challenge(s, B)\n\nif M is None:\n raise AuthenticationFailed()\n\n# Client => Server: M\nHAMK = svr.verify_session(M)\n\nif HAMK is None:\n raise AuthenticationFailed()\n\n# Server => Client: HAMK\nusr.verify_session(HAMK)\n\n# At this point the authentication process is complete.\n\nassert usr.authenticated()\nassert svr.authenticated()\n# malicious client without password\n\n\ndef long_to_bytes(n):\n l = list()\n x = 0\n off = 0\n while x != n:\n b = (n >> off) & 0xFF\n l.append(chr(b))\n x = x | (b << off)\n off += 8\n l.reverse()\n return ''.join(l)\n\n\nA = long_to_bytes(0)\nsvr = srp.Verifier(uname, salt, vkey, A)\n# s, B = svr.get_challenge()\n" }, { "alpha_fraction": 0.47834745049476624, "alphanum_fraction": 0.6182180047035217, "avg_line_length": 46.27058792114258, "blob_id": "dd8ff8d3615fbdb45350ee4fe238f5800945e1ed", "content_id": "2943c259f7282138624c45aed62b3172411d1c07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4018, "license_type": "no_license", "max_line_length": 545, "num_lines": 85, "path": "/set6/ch45/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import DSA\nfrom Crypto.Util import number\nfrom Crypto.Random import random\ndebug = True\np = number.bytes_to_long(b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x89\\xe1\\x85\\x52\\x18\\xa0\\xe7\\xda\\xc3\\x81\\x36\\xff\\xaf\\xa7\\x2e\\xda\\x78\\x59\\xf2\\x17\\x1e\\x25\\xe6\\x5e\\xac\\x69\\x8c\\x17\\x02\\x57\\x8b\\x07\\xdc\\x2a\\x10\\x76\\xda\\x24\\x1c\\x76\\xc6\\x2d\\x37\\x4d\\x83\\x89\\xea\\x5a\\xef\\xfd\\x32\\x26\\xa0\\x53\\x0c\\xc5\\x65\\xf3\\xbf\\x6b\\x50\\x92\\x91\\x39\\xeb\\xea\\xc0\\x4f\\x48\\xc3\\xc8\\x4a\\xfb\\x79\\x6d\\x61\\xe5\\xa4\\xf9\\xa8\\xfd\\xa8\\x12\\xab\\x59\\x49\\x42\\x32\\xc7\\xd2\\xb4\\xde\\xb5\\x0a\\xa1\\x8e\\xe9\\xe1\\x32\\xbf\\xa8\\x5a\\xc4\\x37\\x4d\\x7f\\x90\\x91\\xab\\xc3\\xd0\\x15\\xef\\xc8\\x71\\xa5\\x84\\x47\\x1b\\xb1')\nq = number.bytes_to_long(b'\\xf4\\xf4\\x7f\\x05\\x79\\x4b\\x25\\x61\\x74\\xbb\\xa6\\xe9\\xb3\\x96\\xa7\\x70\\x7e\\x56\\x3c\\x5b')\ng = number.bytes_to_long(b'\\x59\\x58\\xc9\\xd3\\x89\\x8b\\x22\\x4b\\x12\\x67\\x2c\\x0b\\x98\\xe0\\x6c\\x60\\xdf\\x92\\x3c\\xb8\\xbc\\x99\\x9d\\x11\\x94\\x58\\xfe\\xf5\\x38\\xb8\\xfa\\x40\\x46\\xc8\\xdb\\x53\\x03\\x9d\\xb6\\x20\\xc0\\x94\\xc9\\xfa\\x07\\x7e\\xf3\\x89\\xb5\\x32\\x2a\\x55\\x99\\x46\\xa7\\x19\\x03\\xf9\\x90\\xf1\\xf7\\xe0\\xe0\\x25\\xe2\\xd7\\xf7\\xcf\\x49\\x4a\\xff\\x1a\\x04\\x70\\xf5\\xb6\\x4c\\x36\\xb6\\x25\\xa0\\x97\\xf1\\x65\\x1f\\xe7\\x75\\x32\\x35\\x56\\xfe\\x00\\xb3\\x60\\x8c\\x88\\x78\\x92\\x87\\x84\\x80\\xe9\\x90\\x41\\xbe\\x60\\x1a\\x62\\x16\\x6c\\xa6\\x89\\x4b\\xdd\\x41\\xa7\\x05\\x4e\\xc8\\x9f\\x75\\x6b\\xa9\\xfc\\x95\\x30\\x22\\x91')\n\ng = p + 1\n\n\nclass MyDSASigner:\n def __init__(self):\n self.__x = 42\n self.y = pow(g, self.__x, p)\n\n def sign(self, h, priv_key=None, nonce=None):\n x = self.__x if priv_key is None else priv_key\n\n while True:\n k = random.StrongRandom().randint(1, q-1) if nonce is None else nonce\n r = pow(g, k, p) % q\n s2 = number.inverse(k, q) * (number.bytes_to_long(h) + x * r)\n s = pow(k, q-2, q) * (number.bytes_to_long(h) + x * r)\n assert(s2 == s)\n s = s % q\n if r != 0 and s != 0:\n self.k = k\n return (r, s)\n\n def verify(self, sig, h, pub_key=None):\n y = self.y if pub_key is None else pub_key\n r, s = sig\n if not 0 < r < q:\n return False\n if not 0 < s < q:\n return False\n w = number.inverse(s, q)\n u1 = (number.bytes_to_long(h) * w) % q\n u2 = (r * w) % q\n v = ((pow(g, u1, p) * pow(y, u2, p)) % p) % q\n return v == r\n\n\ndef test_MyDSASigner():\n msg = b\"Hello\"\n h = SHA.new(msg).digest()\n\n myDSA = MyDSASigner()\n sig_my = myDSA.sign(h)\n sig_my2 = myDSA.sign(h, 42, myDSA.k)\n assert sig_my == sig_my2\n print('My signature :', sig_my)\n\n key = DSA.construct((myDSA.y, g, p, q, 42))\n sig_py = key.sign(h, myDSA.k)\n print('PyCrypto signature:', sig_py)\n\n assert(sig_my == sig_py)\n print(\"Verification my:\", myDSA.verify(sig_my, h))\n print(\"Verification py:\", key.verify(h, sig_py))\n assert myDSA.verify(sig_my, h)\n assert key.verify(h, sig_py)\n\n\ndef main():\n # if g=0, then r=0, s=h/k, verification will always pass, because g**x = 0 forall x\n print('=== Forging signature ===')\n msg = b\"\"\"Hello, world\"\"\"\n hb = SHA.new(msg).digest()\n y = number.bytes_to_long(b'\\x08\\x4a\\xd4\\x71\\x9d\\x04\\x44\\x95\\x49\\x6a\\x32\\x01\\xc8\\xff\\x48\\x4f\\xeb\\x45\\xb9\\x62\\xe7\\x30\\x2e\\x56\\xa3\\x92\\xae\\xe4\\xab\\xab\\x3e\\x4b\\xde\\xbf\\x29\\x55\\xb4\\x73\\x60\\x12\\xf2\\x1a\\x08\\x08\\x40\\x56\\xb1\\x9b\\xcd\\x7f\\xee\\x56\\x04\\x8e\\x00\\x4e\\x44\\x98\\x4e\\x2f\\x41\\x17\\x88\\xef\\xdc\\x83\\x7a\\x0d\\x2e\\x5a\\xbb\\x7b\\x55\\x50\\x39\\xfd\\x24\\x3a\\xc0\\x1f\\x0f\\xb2\\xed\\x1d\\xec\\x56\\x82\\x80\\xce\\x67\\x8e\\x93\\x18\\x68\\xd2\\x3e\\xb0\\x95\\xfd\\xe9\\xd3\\x77\\x91\\x91\\xb8\\xc0\\x29\\x9d\\x6e\\x07\\xbb\\xb2\\x83\\xe6\\x63\\x34\\x51\\xe5\\x35\\xc4\\x55\\x13\\xb2\\xd3\\x3c\\x99\\xea\\x17')\n z = random.StrongRandom().randint(1, q-1)\n r = pow(y, z, p) % q\n s = (r * number.inverse(z, q)) % q\n print('r:', r)\n print('s:', s)\n myDSA = MyDSASigner()\n print(\"Verification:\", myDSA.verify((r, s), hb, y))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5035461187362671, "alphanum_fraction": 0.5177304744720459, "avg_line_length": 31.899999618530273, "blob_id": "be7a634189a10e9bc9af444936c3f2879342f080", "content_id": "cdbb08d43692b64e8cdceae48c2cae8eeb5bf7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 987, "license_type": "no_license", "max_line_length": 109, "num_lines": 30, "path": "/set2/ch09/pkcs7.c", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#include <string.h> //atoi, strlen\n#include <stddef.h> //size_t\n#include <stdio.h> //printf\n#include <stdlib.h> //exit\n#include <stdbool.h> //bool\n\nbool debug = true;\n\nint main (int argc, char *argv[]){\n if (argc < 3) {\n fprintf (stderr, \"usage: %s N \\\"string\\\"\\n\", argv[0]);\n exit(EXIT_FAILURE);\n }\n size_t n = atoi(argv[1]);\n size_t len = strlen(argv[2]);\n if ( (n<len) || (n>256) ) {\n fprintf(stderr, \"N must be from interval <%zu,256>\", len);\n exit(EXIT_FAILURE);\n }\n\n if (debug) fprintf(stderr, \"Padding length: %zu\\nString length: %zu\\nString: %s\\n\", n, len, argv[2]);\n char diff = n - len;\n if (debug) fprintf(stderr, \"Number of padding bytes: %i\\n\", diff);\n char *nstr;\n nstr = (char *)calloc(n+1,sizeof(char));\n strcpy(nstr, argv[2]);\n memset(nstr+len, diff, diff*sizeof(char));\n printf(\"%s\", nstr);\n return 0;\n}\n" }, { "alpha_fraction": 0.5561232566833496, "alphanum_fraction": 0.5923517346382141, "avg_line_length": 35.49152374267578, "blob_id": "6f30b03f5c8c02065646d2f35578657347933b4a", "content_id": "95a8c7392cca0d9b882abcaa94b80a1efcfae230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6459, "license_type": "no_license", "max_line_length": 122, "num_lines": 177, "path": "/set6/ch42/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Util import number\nimport binascii\ndebug = True\ndebug = False\n\n\ndef getKey():\n import os.path\n if not os.path.isfile('key.pem'):\n if debug:\n print('generating key')\n key = RSA.generate(3072, e=3)\n ex_key = key.exportKey()\n with open('key.pem', 'wb') as of:\n of.write(ex_key)\n else:\n if debug:\n print('loading key from file')\n with open('key.pem', 'rb') as f:\n key = RSA.importKey(f.read())\n return key\n\n\ndef get_signature_padding_len(signature, pub_key):\n dec = pub_key.encrypt(signature, '')[0]\n for pos, z in enumerate(dec[1:]):\n if z == 0x00:\n return pos\n if z != 0xff:\n return None\n\n\ndef debug_valid_signature(valid_signature, pub_key):\n # check verification - encrypt by publick key to get pagged hash\n dec = pub_key.encrypt(valid_signature, '')[0]\n print(\"Length of signature:\", len(dec))\n print(\"Length of n:\", len(number.long_to_bytes(pub_key.n)))\n print(binascii.hexlify(dec))\n # parse signature\n pad_len = get_signature_padding_len(valid_signature, pub_key)\n der_enc = binascii.hexlify(dec[pad_len+2:pad_len+17])\n hash_en = binascii.hexlify(dec[pad_len+17:])\n # print info\n print('=== ucrypted signature === ')\n print('documentation: rfc 3447 section 9.2. fnct name EMSA-PKCS1-v1_5-ENCODE(hash, len)')\n print('length of whole signature:', len(dec))\n print('bt (len: 1 ):', dec[0])\n print('ff padding (len:', pad_len, '): 0xff')\n print('zero byte (len: 1 ):', dec[pad_len+1])\n print('DER enc (len:', len(der_enc) // 2, '):', der_enc)\n print('hash (len:', len(hash_en) // 2, '):', hash_en)\n # print('=== Raw Data ===')\n # print(binascii.hexlify(dec))\n\n\ndef debug_msg_hash(message, h):\n print(\"msg:\", message)\n print(\"SHA 1 hash of msg:\", h.hexdigest())\n\n\ndef check_signature_correct(message, valid_signature, pub_key):\n dec = pub_key.encrypt(valid_signature, '')[0]\n # check block type\n if dec[0] != 1:\n return False\n # check padding 0xff\n pad_len = get_signature_padding_len(valid_signature, pub_key)\n # check DER encoding for SHA 1\n if dec[pad_len+2:pad_len+17] != b'0!0\\t\\x06\\x05+\\x0e\\x03\\x02\\x1a\\x05\\x00\\x04\\x14':\n return False\n # check SHA 1 hash\n h_given = binascii.hexlify(dec[pad_len+17:]).decode('utf-8')\n h_computed = SHA.new(message).hexdigest()\n if h_given != h_computed:\n return False\n return True\n\n\ndef check_signature_bug(message, valid_signature, pub_key):\n assert(pub_key.e == 3)\n # decrypt verification\n enc = number.bytes_to_long(valid_signature)\n dec = number.long_to_bytes(pow(enc, 3, pub_key.n))\n # check signature\n check_str = b'\\xff\\x00' + \\\n b'0!0\\t\\x06\\x05+\\x0e\\x03\\x02\\x1a\\x05\\x00\\x04\\x14' + \\\n binascii.unhexlify(SHA.new(message).hexdigest().encode('utf-8'))\n return (check_str in dec)\n\n\ndef dpl(n, x):\n print(n, binascii.hexlify(number.long_to_bytes(x)))\n\n\ndef create_forge_signature(msg, pub_key):\n # based on paper:\n # \"Bleichenbacher's RSA-Signature Forgery\" - Solving Prof. Stamp's Challenge Number 21\n # Steffen Rumpf\n assert pub_key.e == 3, \"Works only for public key 3\"\n # input parameters\n l = 384 # length of the signature EB in bytes\n nff = 50\n Data = b'\\x30\\x21\\x30\\x09\\x06\\x05\\x2b\\x0e\\x03\\x02\\x1a\\x05\\x00\\x04\\x14' # DER code\n Data += b'\\x92\\x5a\\x89\\xb4\\x3f\\x3c\\xaf\\xf5\\x07\\xdb\\x0a\\x86\\xd2\\x0a\\x24\\x28\\x00\\x7f\\x10\\xb6' # hash of string 'hi mom'\n # derived values from input parameters\n D = number.bytes_to_long(Data)\n Dlen = len(Data)\n de = (l-2-nff)*8 # position (in bytes) where the padding block (\\xff) ens\n ds = (l-3-nff-Dlen)*8 # position (in bytes) where the data block starts\n n = l * 8 # length of the signature EB in bites\n x = n - 15\n assert(x % 3 == 0)\n assert(ds > 2*x//3)\n N = 2**(de-ds) - D\n assert (N % 3 == 0)\n # compute G\n A = 2**(x//3)\n B = N*2**(ds-2*x//3)//3\n LS7 = (A-B)**3 # left side of equation 7 on page 8 from paper\n RS7 = A**3 - 3*A*A*B + 3*A*B*B - B**3 # right side of equation 7 on page 8 from paper\n assert(RS7 == LS7)\n G = 3*A*B*B-B**3 # computed garbage\n # dpl('G:', G)\n EB4 = (2**x)-(2**de)+(D*2**ds)+G # EB based on equation 4 on page 6\n # problem if G is too big and is mixed with data\n # pok = (2**x)-(2**de)\n # pok2 = (D*2**ds)\n # pok3 = pok + pok2\n # dpl('pok', pok)\n # dpl('pok2', pok2)\n # dpl('pok3', pok3)\n # dpl('pok3+G', pok3+G)\n # print('len(G):', len(number.long_to_bytes(G)))\n # print('n-Dlen-nff:', l - Dlen - nff)\n assert l - Dlen - nff > len(number.long_to_bytes(G))\n EB5 = 2**x - N*(2**ds) + G # EB based on equation 5.3 on page 6\n assert(EB5 == EB4)\n assert(EB5**3 == RS7**3)\n assert(pow(EB5, 3, pub_key.n) == pow(RS7, 3, pub_key.n))\n assert(EB5 == RS7)\n S = A - B # encrypted signature\n assert(EB4 == S**3)\n dpl('dec(S):', S**3)\n if debug:\n dpl('Sign. :', S)\n return number.long_to_bytes(S)\n\n\ndef main():\n message = b'hi mom'\n h = SHA.new(message)\n if debug:\n debug_msg_hash(message, h)\n key = getKey()\n pub_key = key.publickey()\n signer = PKCS1_v1_5.new(key)\n valid_signature = signer.sign(h)\n debug_valid_signature(valid_signature, pub_key)\n print(\"=== Checking signatures ===\")\n print(\"Checking valid signature with valid function:\", check_signature_correct(message, valid_signature, pub_key))\n print(\"Checking valid signature with invalid function:\", check_signature_bug(message, valid_signature, pub_key))\n print(\"=== Creating forge signature ===\")\n forge_signature = create_forge_signature(message, pub_key)\n if debug:\n debug_valid_signature(forge_signature, pub_key)\n print(\"=== Checking forge signature ===\")\n print(\"Checking forged signature with valid function:\", check_signature_correct(message, forge_signature, pub_key))\n print(\"Checking forged signature with invalid function:\", check_signature_bug(message, forge_signature, pub_key))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6188604831695557, "alphanum_fraction": 0.6463654041290283, "avg_line_length": 32.93333435058594, "blob_id": "e4b0e6d0bb24b516d2fa8822d92910b4c4413a18", "content_id": "2dde44c4dca2b1d4b051520ba4f76af1045805fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/set3/ch18/pycrypto.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\n\nif __name__ == \"__main__\":\n ctr = Counter.new(64, prefix=b'\\x00'*8, initial_value=0, little_endian=True)\n obj = AES.new(b\"YELLOW SUBMARINE\", AES.MODE_CTR, counter=ctr)\n\n with open(\"data.b64\", \"rt\") as f:\n lines = f.readlines()\n import base64\n enc = base64.decodebytes(lines[0].encode('ascii'))\n dec = obj.encrypt(enc)\n print(\"\".join([chr(d) for d in dec]))\n" }, { "alpha_fraction": 0.5234187841415405, "alphanum_fraction": 0.608547031879425, "avg_line_length": 42.656715393066406, "blob_id": "ec6a2ab5d938fcef1b417a017e162ed7e6b44c8e", "content_id": "bde372d97bce0a83a75a53ef3d4805510cb93520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5850, "license_type": "no_license", "max_line_length": 123, "num_lines": 134, "path": "/set3/ch23/test.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport mt19937\nimport unittest\nimport numpy as np\nimport struct\nimport random\n\n\nclass mt19937_random_numbers_small(unittest.TestCase):\n \"\"\"My implementation of mt19937 must return same numbers as numpy implementation of mt19937\n test for one number, seed is 42\"\"\"\n\n def setUp(self):\n self.rn = np.random.RandomState(42)\n\n self.mt = [0] * 624\n self.mt = mt19937.init(42)\n self.mt = mt19937.twist(self.mt)\n\n def test_random_numbers_small(self):\n np_r = struct.unpack(\"<L\", self.rn.bytes(4))[0]\n mt_r = mt19937.extract_number(self.mt, 0)\n self.assertEqual(np_r, mt_r, \"[ERROR]: seed=42 twist=1 i=0\")\n\n\nclass mt19937_random_numbers_big(unittest.TestCase):\n \"\"\"My implementation of mt19937 must return same numbers as numpy implementation of mt19937\n test for more numbers (20*624), seed is random\"\"\"\n def setUp(self):\n self.seed = random.getrandbits(32)\n self.rounds = 20\n\n self.rn = np.random.RandomState(self.seed)\n\n self.mt = [0] * 624\n self.mt = mt19937.init(self.seed)\n\n def test_random_numbers_big(self):\n for i in range(624*self.rounds):\n if i % 624 == 0:\n self.mt = mt19937.twist(self.mt)\n np_r = struct.unpack(\"<L\", self.rn.bytes(4))[0]\n mt_r = mt19937.extract_number(self.mt, i % 624)\n self.assertEqual(np_r, mt_r, \"[ERROR]: seed={}, twist={}, i={}\".format(self.seed, i // 624, i))\n\n\nclass mt19937_untwist(unittest.TestCase):\n \"\"\"Test my implementation of untwist function\"\"\"\n def do_test_state(self, mt):\n # States are the same except first part, which can not be recover\n self.assertListEqual(mt[1:],\n mt19937.untwist(mt19937.twist(mt))[1:],\n \"test with state mt == untwist(twist(mt)), mt: {}\".format(mt)) # mt[0] is not recoverable\n # if you twist untwisted state, also first part must be twisted correctly\n self.assertListEqual(mt19937.twist(mt),\n mt19937.twist(mt19937.untwist(mt19937.twist(mt))),\n \"test with state twist(mt) == twist(untwist(twist(mt))), mt: {}\".format(mt))\n\n def do_test_seed(self, seed):\n mt = mt19937.init(seed)\n # States are the same except first part, which can not be recover\n self.assertListEqual(mt[1:],\n mt19937.untwist(mt19937.twist(mt))[1:],\n \"test with seed: mt == untwist(twist(mt)), seed: {}\".format(seed)) # mt[0] is not recoverable\n # if you twist untwisted state, also first part must be twisted correctly\n self.assertListEqual(mt19937.twist(mt),\n mt19937.twist(mt19937.untwist(mt19937.twist(mt))),\n \"test with seed: twist(mt) == twist(untwist(twist(mt))), seed: {}\".format(seed))\n\n def test_untwist(self):\n # untwist state, that was created from seed by one run of twist\n for seed in [42, 4190403025, 1303704821] + [random.getrandbits(32) for _ in range(10)]:\n self.do_test_seed(seed)\n\n # untwist state, that was created from seed by two runs of twist\n for seed in [42, 4190403025, 1303704821] + [random.getrandbits(32) for _ in range(10)]:\n self.do_test_state(mt19937.twist(mt19937.twist(mt19937.init(seed))))\n\n # untwist randomly generated state\n mt = [0] * 624\n for _ in range(10):\n for i in range(624):\n mt[i] = random.getrandbits(32)\n self.do_test_state(mt)\n\n\nclass mt19937_is_init(unittest.TestCase):\n def test_is_init(self):\n testData = [42, 4190403025, 1303704821] + [random.getrandbits(32) for _ in range(10)]\n # function init generates initial state\n for seed in testData:\n self.assertTrue(mt19937.is_init(mt19937.init(seed)))\n # function twist generates state, that is not init\n for seed in testData:\n self.assertFalse(mt19937.is_init(mt19937.twist(mt19937.init(seed))))\n # function untwist applied on one time twisted state generates initial state\n for seed in testData:\n self.assertTrue(mt19937.is_init(mt19937.untwist(mt19937.twist(mt19937.init(seed)))))\n # two times applied function twist and untwist is not working because I am loosing information from mt[0]\n for seed in testData:\n mt = mt19937.init(seed)\n mt = mt19937.twist(mt)\n mt = mt19937.twist(mt)\n mt = mt19937.untwist(mt)\n mt = mt19937.untwist(mt)\n # self.assertTrue(mt19937.is_init(mt), \"error for seed: {}\".format(seed))\n\n\nclass mt19937_uninit(unittest.TestCase):\n def test_uninit(self):\n testData = [42, 4190403025, 1303704821] + [random.getrandbits(32) for _ in range(10)]\n for seed in testData:\n # extract seed from initial state\n self.assertEqual(mt19937.uninit(mt19937.init(seed)), seed)\n # perform twist, then untwist and then extract seed\n self.assertEqual(mt19937.uninit(mt19937.untwist(mt19937.twist(mt19937.init(seed)))), seed)\n # if state is not initial None is returned\n self.assertIsNone(mt19937.uninit(mt19937.twist(mt19937.init(seed))))\n\n\nclass mt19937_unextract(unittest.TestCase):\n def test_unextract(self):\n testData = [42, 4190403025, 1303704821] + [random.getrandbits(32) for _ in range(10)]\n for seed in testData:\n for i in range(624):\n mt = mt19937.init(seed)\n rn = mt19937.extract_number(mt, i)\n tm = mt19937.unextract_number(rn, i)\n self.assertEqual(mt[i], tm)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n" }, { "alpha_fraction": 0.5694563984870911, "alphanum_fraction": 0.6005176901817322, "avg_line_length": 24.755556106567383, "blob_id": "5bfef18de3488a1ec813eae6b4f00ca4ca739290", "content_id": "bb8a807d548bdf9b4d888979ddca23e1924ae23c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 57, "num_lines": 45, "path": "/set2/ch11/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nimport random\n\n\ndef append(input):\n lpad = Random.new().read(5+random.randint(0, 5))\n rpad = Random.new().read(5+random.randint(0, 5))\n msg = lpad + input + rpad\n length = 16 - (len(msg) % 16)\n msg += bytes([length])*length\n return msg\n\n\ndef print_debug(msg, data):\n print(msg)\n for i in range(len(data) // 16):\n print(data[i*16:(i+1)*16])\n\n\ndef encryption_oracle(input):\n passwd = Random.new().read(16)\n if random.randint(0, 1):\n iv = Random.new().read(AES.block_size)\n obj = AES.new(passwd, AES.MODE_CBC, iv)\n mode = 'CBC'\n else:\n obj = AES.new(passwd, AES.MODE_ECB)\n mode = 'ECB'\n print(\"chosen mode:\" + mode)\n msg = append(input)\n print_debug(\"message\", msg)\n cip = obj.encrypt(msg)\n print_debug(\"encrypted\", cip)\n return (mode, cip)\n\n\ndef detect(blob):\n return 'ECB' if blob[16:32] == blob[32:48] else 'CBC'\n\nif __name__ == \"__main__\":\n mode, blob = encryption_oracle(bytes(11+16+16))\n print(\"OK\" if mode == detect(blob) else \"wrong\")\n" }, { "alpha_fraction": 0.4747474789619446, "alphanum_fraction": 0.5151515007019043, "avg_line_length": 15.5, "blob_id": "02dba4bd24f16e77cd5ad3d8fa0a83a3da5280d6", "content_id": "b3dfdc65f5757343b5a59b30ae0f87249ee56bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 99, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/set3/ch20/conv.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n for line in $(cat data.b64)\n do\n echo \"$line\" | base64 -d\n echo\n done\n" }, { "alpha_fraction": 0.4649122953414917, "alphanum_fraction": 0.4707602262496948, "avg_line_length": 30, "blob_id": "55418a50e72d2c90115ec6d3697c349d5d2f429d", "content_id": "74deb90ba90a2b63b78666a304664065d78642d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 342, "license_type": "no_license", "max_line_length": 70, "num_lines": 11, "path": "/set1/ch8/bash.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor line in $(cat data.hex); do\n dupl=$( echo $line | tr -d \"\\n\" | fold -b16 | sort | uniq -d )\n if [ -n \"$dupl\" ]; then # if string $dupl is not empty\n echo \"Line with duplicite blocks:\"\n echo $line\n echo \"duplicite blocks:\"\n echo $dupl\n fi\ndone\n\n" }, { "alpha_fraction": 0.5673819780349731, "alphanum_fraction": 0.5793991684913635, "avg_line_length": 28.100000381469727, "blob_id": "396ebbad2b40435cf162ac04e04babdb7b898016", "content_id": "3fa8ccdf42e33370de06ae99d5555a763c4b7337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 90, "num_lines": 40, "path": "/set4/ch27/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nimport sys\n\n\nclass PlayGround:\n def __init__(self):\n from Crypto import Random\n self.__passwd = Random.new().read(16)\n self.__iv = self.__passwd\n\n def send_enc_url(self):\n obj = AES.new(self.__passwd, AES.MODE_CBC, self.__iv)\n msg = \"http://www.safeurls.edu/absolutely/innocent/url/\"\n return obj.encrypt(msg)\n\n def recv_validate(self, input):\n obj = AES.new(self.__passwd, AES.MODE_CBC, self.__iv)\n cip = obj.decrypt(input)\n return (cip if not cip.decode('ascii', errors='replace').isprintable() else b\"OK\")\n\n def check(self, k):\n sys.exit(\"Key is correct\" if k == self.__passwd else \"Key is incorrect\")\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n data = pg.send_enc_url()\n if pg.recv_validate(data) == b\"OK\":\n print(\"url is OK\")\n\n mod_data = data[:16] + bytes([0]*16) + data[:16]\n err = pg.recv_validate(mod_data)\n key = []\n for i in range(16):\n key += [int(err[i])^int(err[32+i])]\n key = bytes(key)\n print(\"IV/key is:\", key)\n pg.check(key)\n\n" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 31, "blob_id": "e70353013a295fbf50dec0228d6a9bac22465ebd", "content_id": "1ca99f6da69a805224b063c87590a05f4ea8952c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 96, "license_type": "no_license", "max_line_length": 82, "num_lines": 3, "path": "/set1/ch7/bash.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nopenssl enc -d -aes-128-ecb -in data.bin -K $(echo -n \"YELLOW SUBMARINE\" | xxd -p)\n" }, { "alpha_fraction": 0.576301634311676, "alphanum_fraction": 0.6068222522735596, "avg_line_length": 36.13333511352539, "blob_id": "3c01927948fa5427499d09fc827f5349ff6e1a31", "content_id": "f9ce2ad730096e49368e3e5e54dbdc574c9b919c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 121, "num_lines": 15, "path": "/set2/ch10/python.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\n\n\ndef decodeCBC(data, passwd):\n obj = AES.new(passwd, AES.MODE_ECB)\n print(obj.decrypt(data[0:16]).decode(), end=\"\") # decode first block with IV\n for bn in range(1, len(data)//16): # decode next blocks\n print(\"\".join([chr(a ^ b) for (a, b) in zip(obj.decrypt(data[bn*16:(bn+1)*16]), data[(bn-1)*16:bn*16])]), end=\"\")\n\nif __name__ == \"__main__\":\n with open(\"data.bin\", \"rb\") as f:\n data = f.read()\n decodeCBC(data, 'YELLOW SUBMARINE')\n" }, { "alpha_fraction": 0.5354330539703369, "alphanum_fraction": 0.5459317564964294, "avg_line_length": 21.41176414489746, "blob_id": "09ead2824e10a66298ffcb26b68b583eaaab35e5", "content_id": "af2b849975db79afed9e58b7ee7927e2e8c51079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 82, "num_lines": 17, "path": "/set1/ch5/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\n\n\ndef encode(text, key):\n ret = \"\"\n pos = 0\n for c in text:\n ret += \"%.2x\" % (c ^ key[pos])\n pos = (pos + 1) % len(key)\n return ret\n\nif __name__ == \"__main__\":\n for line in sys.stdin.readlines():\n print(encode(bytearray(line.strip(), 'ascii'), bytearray('ICE', 'ascii')))\n" }, { "alpha_fraction": 0.5524412393569946, "alphanum_fraction": 0.5660036206245422, "avg_line_length": 26.649999618530273, "blob_id": "07691c3ce6fec7f4bde6dd4b12f60d0ba8b57705", "content_id": "609fa196ccb6e72717ad79c41de63f440cbf7b2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 84, "num_lines": 40, "path": "/set4/ch25/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nimport random\n\n\nclass PlayGround:\n def __init__(self):\n self.__key = bytes([random.getrandbits(8) for _ in range(16)])\n with open(\"25.txt\", \"rb\") as f:\n self.__data = f.read()\n\n def crypt(self, data, key):\n ctr = Counter.new(64, prefix=b'\\x00'*8, initial_value=0, little_endian=True)\n obj = AES.new(key, AES.MODE_CTR, counter=ctr)\n return obj.encrypt(data)\n\n def ciphertext(self):\n return self.crypt(self.__data, self.__key)\n\n def edit(self, data, key, offset, newtext):\n if key is None:\n key = self.__key\n\n tmp = bytearray(self.crypt(data, key))\n for i in range(len(newtext)):\n tmp[offset+i] = newtext[i]\n return self.crypt(bytes(tmp), key)\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n\n enc = pg.ciphertext()\n\n for i in range(len(enc)):\n for c in range(256):\n if enc == pg.edit(enc, None, i, [c]):\n print(chr(c), end=\"\")\n" }, { "alpha_fraction": 0.4922839403152466, "alphanum_fraction": 0.5, "avg_line_length": 19.25, "blob_id": "77387f080e4ab0bf97a0f2f809f6e6208ed8b821", "content_id": "0639fe877755f07be11f365a6fba1bef1e0e35fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 55, "num_lines": 32, "path": "/set1/ch3/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\n\n\ndef xors(inp, c):\n ret = bytearray()\n for x in inp:\n ret.append(x ^ c)\n return ret\n\n\ndef score(inp):\n return inp.count(\" \")\n\n\ndef decode(enc):\n best = \"\"\n maxs = 0\n for i in range(255):\n pok = str(xors(enc, i))\n if score(pok) > maxs:\n maxs = score(pok)\n best = pok\n # print(str(score(pok))+\" \"+str(i)+\" \"+pok)\n return best\n\nif __name__ == \"__main__\":\n for line in sys.stdin.readlines():\n enc = bytearray.fromhex(line.strip())\n print(decode(enc))\n" }, { "alpha_fraction": 0.5611457824707031, "alphanum_fraction": 0.5850165486335754, "avg_line_length": 31.035293579101562, "blob_id": "13f0adfcd9da4f3db9d885307918b439973d6fa7", "content_id": "f18d6f016206ed1de92719d1f357b3c26d9c6fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 121, "num_lines": 85, "path": "/set1/ch6/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\"\"\"Usage: sollution [-h | --help] [-v] [-f FILE] [-S HISNUM] [-s] [-k]\n\nOptions:\n -h --help Show help screen\n -v Print verbose output\n -f FILE input file [default: data.hex]\n -s Show histogram of 20 bytes\n -S HISNUM Show histogram of HISNUM bytes\n -k Print normalized Hammington distances for different key length <2,40>\n\"\"\"\n\nfrom __future__ import print_function\nimport bitstring\nimport docopt\nimport sys\n\n\ndef hamdist(str1, str2):\n diffs = 0\n assert len(str1) == len(str2), \"length of arguments are different\"\n for i in range(len(str1)):\n a = bitstring.pack('uint:8', str1[i])\n b = bitstring.pack('uint:8', str2[i])\n diffs += (a ^ b).count(True)\n return diffs\n\n\ndef histogram(data, num):\n from collections import Counter\n c = Counter(data)\n for value, frequency in c.most_common(num):\n print(\"0x{:02x}: {}\".format(value, frequency))\n\n\ndef normham(data, keylen):\n global verb\n b1 = data[0*keylen:1*keylen]\n b2 = data[1*keylen:2*keylen]\n b3 = data[2*keylen:3*keylen]\n b4 = data[3*keylen:4*keylen]\n b5 = data[4*keylen:5*keylen]\n b6 = data[5*keylen:6*keylen]\n hd1 = hamdist(b1, b2)\n hd2 = hamdist(b3, b4)\n hd3 = hamdist(b5, b6)\n hd = (hd1 + hd2 + hd3) / 3\n hd = hd1\n ret = hd / keylen\n if verb:\n print(\"Normalized Hammington distance: \"+str(ret)+\" Hammingotn distance: \"+str(hd)+\" Key length: \"+str(keylen))\n return ret\n\nif __name__ == \"__main__\":\n assert hamdist(\"this is a test\".encode('ascii'), \"wokka wokka!!!\".encode('ascii')) == 37, \"error in hamdist function\"\n assert hamdist(b'\\x03\\xfd', b'\\x05\\xfe') == 4, \"error in hamdist function\"\n assert hamdist(bytes(b'\\xff'), bytes(b'\\xfe')) == 1, \"error in hamdist function\"\n arguments = docopt.docopt(__doc__)\n\n # parse arguments\n fn = arguments['-f'] if arguments['-f'] is not None else \"data.hex\"\n verb = arguments['-v']\n if verb:\n print(\"Arguments:\\n\"+str(arguments), file=sys.stderr)\n print(\"\\nOpening file: \"+fn, file=sys.stderr)\n\n # open file\n with open(fn, \"rt\") as f:\n lines = f.readlines()\n data = bytearray.fromhex(lines[0].strip())\n\n # show histogram of bytes\n if arguments['-s'] or (arguments['-S'] is not None):\n hisnum = int(arguments['-S']) if arguments['-S'] is not None else 20\n if verb:\n print(\"Showing histogram for \"+str(hisnum)+\" bytes\", file=sys.stderr)\n histogram(data, hisnum)\n\n # guess key length\n if arguments['-k']:\n for k in range(2, 41):\n n = str(k)+\" \"+str(normham(data, k))\n if not verb:\n print(n)\n" }, { "alpha_fraction": 0.46073299646377563, "alphanum_fraction": 0.46858638525009155, "avg_line_length": 19.105262756347656, "blob_id": "39aac9c6b1eae8dae47d6cfd33d246c9ce29a208", "content_id": "e366bdbd499556f6ce4711e19c8ca43b761ef00e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "no_license", "max_line_length": 55, "num_lines": 38, "path": "/set1/ch4/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\n\n\ndef xors(inp, c):\n ret = bytearray()\n for x in inp:\n ret.append(x ^ c)\n return ret\n\n\ndef score(inp):\n return inp.count(\" \")-inp.count(\"x\")\n\n\ndef decode(enc):\n best = \"\"\n maxs = 0\n for i in range(255):\n pok = str(xors(enc, i))\n if score(pok) > maxs:\n maxs = score(pok)\n best = pok\n # print(str(score(pok))+\" \"+str(i)+\" \"+pok)\n return maxs, best\n\nif __name__ == \"__main__\":\n b = \"\"\n ms = 0\n for line in sys.stdin.readlines():\n enc = bytearray.fromhex(line.strip())\n m, dec = decode(enc)\n if m > ms:\n ms = m\n b = dec\n print(b)\n" }, { "alpha_fraction": 0.49050474166870117, "alphanum_fraction": 0.5809594988822937, "avg_line_length": 28.211679458618164, "blob_id": "5785e81bafea604522b9d4ed2a31e3f7c2a6f6f6", "content_id": "373ed9dcb23ad209e84680ebc97019d1fcbedd8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4002, "license_type": "no_license", "max_line_length": 403, "num_lines": 137, "path": "/set5/ch35/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport random\nimport hashlib\nfrom Crypto.Cipher import AES\n\np_nist = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\n\n\nclass Alice:\n def __init__(self):\n self.p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\n self.g = 2\n self.a = random.randint(0, self.p)\n self.A = pow(self.g, self.a, self.p)\n self.msg = random.randint(33, 100).to_bytes(16, byteorder='big')\n\n def send_param(self):\n return self.p, self.g, self.A\n\n def receive_key(self, B):\n self.B = B\n self.s = pow(self.B, self.a, self.p)\n self.key = hashlib.sha1(str(self.s).encode('utf8')).hexdigest()[0:16].encode('utf8')\n\n def send_msg(self):\n iv = random.getrandbits(16*8).to_bytes(16, byteorder='big')\n obj = AES.new(self.key, AES.MODE_CBC, iv)\n return iv + obj.encrypt(self.msg)\n\n def check_response(self, m):\n # decrypt message\n iv = m[0:16]\n obj = AES.new(self.key, AES.MODE_CBC, iv)\n msg = obj.decrypt(m[16:])\n num = int.from_bytes(msg, byteorder='big')\n # check if number in message in incrementd self.msg\n num -= 1\n if self.msg == num.to_bytes(16, byteorder='big'):\n print(\"Check OK\")\n return True\n else:\n print(\"Check error\")\n print(\"got: \", msg)\n print(\"shoud be:\", num.to_bytes(16, byteorder='big'))\n return False\n\n\nclass Bob:\n def receive_param(self, p, g, A):\n self.p = p\n self.g = g\n self.A = A\n self.b = random.randint(0, self.p)\n self.B = pow(self.g, self.b, self.p)\n self.s = pow(self.A, self.b, self.p)\n self.key = hashlib.sha1(str(self.s).encode('utf8')).hexdigest()[0:16].encode('utf8')\n\n def send_key(self):\n return self.B\n\n def send_response(self, m):\n # decrypt message\n iv = m[0:16]\n obj = AES.new(self.key, AES.MODE_CBC, iv)\n msg = obj.decrypt(m[16:])\n num = int.from_bytes(msg, byteorder='big')\n # increment number from message\n num += 1\n # encrypt message\n obj2 = AES.new(self.key, AES.MODE_CBC, iv)\n return iv + obj2.encrypt(num.to_bytes(16, byteorder='big'))\n\n\ndef decode_msg(p, g, msg):\n def decrypt(s, msg):\n iv = msg[0:16]\n key = hashlib.sha1(str(s).encode('utf8')).hexdigest()[0:16].encode('utf8')\n obj = AES.new(key, AES.MODE_CBC, iv)\n m = obj.decrypt(msg[16:])\n return m\n\n if g == 1:\n m = decrypt(1, msg)\n elif g == p:\n m = decrypt(0, msg)\n elif g == p-1:\n m1 = decrypt(1, msg)\n m2 = decrypt(p-1, msg)\n m = m1 if m1[0] == 0 else m2\n\n print(\"decoded message:\", m)\n\n\ndef mitm(g):\n m = True # MITM\n if g == 1:\n print(\"MITM for g:1\")\n elif g == p_nist:\n print(\"MITM for g:p\")\n elif g == p_nist-1:\n print(\"MITM for g:p-1\")\n elif g == 0:\n print(\"normal run\")\n m = False\n\n alice = Alice()\n if m:\n alice.g = g # MITM\n alice.A = g # MITM\n bob = Bob()\n p, g, A = alice.send_param()\n bob.receive_param(p, g, A)\n B = bob.send_key()\n alice.receive_key(B)\n m1 = alice.send_msg()\n if m:\n decode_msg(p, g, m1) # decode msg\n m2 = bob.send_response(m1)\n if m:\n decode_msg(p, g, m2) # decode msg\n alice.check_response(m2)\n del alice\n del bob\n\n\ndef main():\n # normal protocol for echo bot\n mitm(0)\n\n # MITM\n mitm(1)\n mitm(p_nist)\n mitm(p_nist-1)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.59375, "avg_line_length": 15, "blob_id": "1b12ca97ee3497573842f7847812a7c509970344", "content_id": "1e1a343fae718ad5b0f647ac0dfe3c4c412ca0a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 32, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/set1/ch7/Makefile", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "all:\n\tcc -o c.exe c.c -lcrypto\n" }, { "alpha_fraction": 0.46952009201049805, "alphanum_fraction": 0.5635538101196289, "avg_line_length": 31.125, "blob_id": "93b92abeda8bc225abf64ea9a5f00da9f496bd75", "content_id": "2da5c51b15b89aec4354a94deb88ae6460e47981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1542, "license_type": "no_license", "max_line_length": 59, "num_lines": 48, "path": "/set2/ch13/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\n\n\nclass PlayGround:\n def __init__(self):\n from Crypto import Random\n self.__passwd = Random.new().read(16)\n\n def __enc(self, text):\n length = 16 - (len(text) % 16)\n msg = text.encode(\"utf-8\") + bytes([length])*length\n obj = AES.new(self.__passwd, AES.MODE_ECB)\n cip = obj.encrypt(msg)\n return cip\n\n def __dec(self, text):\n obj = AES.new(self.__passwd, AES.MODE_ECB)\n cip = obj.decrypt(text)\n if cip[-1] < 32: # remove padding\n cip = cip[:-cip[-1]]\n return cip.decode(\"utf-8\")\n\n def profile_for(self, mail):\n mail = mail.translate(str.maketrans(\"\", \"\", \"&=\"))\n msg = \"email=\"+mail+\"&uid=10&role=user\"\n return self.__enc(msg)\n\n def parse(self, cookie):\n msg = self.__dec(cookie)\n obj = msg.split(\"&\")\n return obj\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n # 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF\n # email=0123456789 ABC&uid=10&role= user[\\x12]*12\n # admin[\\x11]*11\n # email=0123456789 admin[\\x11]*11 &uid=10&role=..\n # sollution:\n # s1=\"0123456789ABC\" (use first and second block)\n # s2=\"0123456789admin[\\x12]*11\" (append second block)\n s1 = b'0123456789ABC'\n s2 = b'0123456789admin'+b\"\\x0b\"*11\n d1 = pg.profile_for(s1.decode('ascii'))[0:32]\n d2 = pg.profile_for(s2.decode('ascii'))[16:32]\n print(pg.parse(d1+d2))\n" }, { "alpha_fraction": 0.5223515033721924, "alphanum_fraction": 0.5529699921607971, "avg_line_length": 27.15517234802246, "blob_id": "90bb8533bef2e9151dfe218a06b93b08b9e911d6", "content_id": "63ecaf19bc51f2f6b580870e5af7cd3a271971a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1633, "license_type": "no_license", "max_line_length": 106, "num_lines": 58, "path": "/set3/ch19/solver.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport binascii\nimport sys\nfrom colorama import Fore, Style\n\n\ndef show(blok, keys):\n for i in range(0, len(blok)):\n for j, znak in enumerate(blok[i]):\n if j < len(keys):\n print(\"{}\".format(chr(znak ^ keys[j])), end=\"\")\n else:\n print(Fore.RED + \"{:3} \".format(znak) + Style.RESET_ALL, end=\"\")\n print()\n\n\ndef score_bytes(data):\n ret = 0\n for c in data:\n ret += 1 if ord('a') <= c <= ord('z') else 0\n ret += 1 if ord('A') <= c <= ord('Z') else 0\n ret += 1 if c == 32 else 0\n return ret\n\n\ndef score_block(blok):\n ret = {}\n for k in range(0, 256):\n dec = [(k ^ c) for c in blok]\n ret[k] = score_bytes(dec)\n return ret\n\n\ndef score_column(rows, i):\n blok = [r[i] for r in rows if i < len(r)]\n score = score_block(blok)\n from collections import Counter\n c = Counter(score)\n print(c.most_common(8))\n return c.most_common(1)[0][0]\n\nif __name__ == \"__main__\":\n fn = sys.argv[1] if len(sys.argv) > 1 else \"encoded_data.hex\"\n with open(fn, \"rt\") as f:\n lines = f.readlines()\n\n rows = [None] * len(lines)\n data = []\n for i, line in enumerate(lines):\n rows[i] = binascii.unhexlify(line.rstrip().encode('utf-8'))\n\n keys = []\n for p in range(0, 30):\n keys += [score_column(rows, p)]\n keys += [174, 187, 249] # added by hand because too little charctersto detct automatically\n keys += [145, 144, 141, 183, 111] # used google to find poem - it is not possible to guess characters\n show(rows, keys)\n" }, { "alpha_fraction": 0.5557553768157959, "alphanum_fraction": 0.5611510872840881, "avg_line_length": 25.4761905670166, "blob_id": "da7f07878d4e04c92b7d4330cec58a3215845812", "content_id": "87c1b51a36c2eed56491fcff0d5eae82fdb8111f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "no_license", "max_line_length": 97, "num_lines": 21, "path": "/set1/ch2/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\n\n\ndef xorstrings(txt, hes):\n if len(txt) != len(hes):\n print(\"length of text:\"+str(len(txt))+\"\\nlength of pass:\"+str(len(hes)), file=sys.stderr)\n return \"\"\n else:\n ret = \"\"\n for i in range(len(txt)):\n ret += \"%x\" % (txt[i] ^ hes[i])\n return ret\n\nif __name__ == \"__main__\":\n lines = sys.stdin.readlines()\n txt = bytearray.fromhex(lines[0].strip())\n hes = bytearray.fromhex(lines[1].strip())\n print(xorstrings(txt, hes))\n" }, { "alpha_fraction": 0.46947214007377625, "alphanum_fraction": 0.4789589047431946, "avg_line_length": 30.623077392578125, "blob_id": "4f7664ec0815f05e76e51570a2e1435443bc6220", "content_id": "b6ebf7205f6d26da155639c33a04c511dfba0a0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4111, "license_type": "no_license", "max_line_length": 114, "num_lines": 130, "path": "/set1/ch6/crypto_xor_find_more_bytes_key.c", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#include <stdlib.h> //exit\n#include <ctype.h> //isalnum\n#include <stdbool.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <sys/mman.h>\n\nstruct stat sb;\nuint8_t *p;\nbool debug = true;\n\nvoid read_into_memory(char *fname) {\n int fd = open (fname, O_RDONLY);\n if (fd == -1) {\n perror (\"open\");\n exit(EXIT_FAILURE);\n }\n if (fstat (fd, &sb) == -1) {\n perror (\"fstat\");\n exit(EXIT_FAILURE);\n }\n if (!S_ISREG (sb.st_mode)) {\n fprintf (stderr, \"%s is not a file\\n\", fname);\n exit(EXIT_FAILURE);\n }\n p = mmap (0, sb.st_size, PROT_READ, MAP_SHARED, fd, 0);\n if (p == MAP_FAILED) {\n perror (\"mmap\");\n exit(EXIT_FAILURE);\n }\n if (close (fd) == -1) {\n perror (\"close\");\n exit(EXIT_FAILURE);\n }\n}\n\nvoid free_memory() {\n if (munmap (p, sb.st_size) == -1) {\n perror (\"munmap\");\n exit(EXIT_FAILURE);\n }\n}\n\nint score_char(uint8_t znak) {\n if (znak == ' ') return 1;\n else if (isalnum(znak)) return 1;\n else return 0;\n}\n\nint score_key_keylen(uint8_t key, int key_len, int offset){\n int score = 0;\n for (off_t len = offset; len < sb.st_size; len+=key_len) //score the buffer for given key and key_len\n score += score_char(p[len] ^ key);\n return score;\n}\n\nint score_keylen(int key_len){\n //return score for best key of given key_len\n uint8_t key = 0;\n int score, best_score = 0;\n for (int i = 0; i < 256; i++, key++){\n score = score_key_keylen(key, key_len, 0);\n if (score > best_score)\n best_score = score;\n }\n return best_score;\n}\n\nint find_key_len(int max_key_len) {\n int best_key_len, score, best_score;\n float norm_score, best_norm_score = 0;\n if (debug) printf(\"length of buffer: %i\\n\", (int)sb.st_size);\n\n for (int key_len = 1; key_len <= max_key_len; key_len++) {\n score = score_keylen(key_len);\n norm_score = score / ((float)sb.st_size/key_len);\n if (norm_score > best_norm_score) {\n best_norm_score = norm_score;\n best_key_len = key_len;\n best_score = score;\n }\n }\n if (debug) printf(\"Score for keylength %i: %i\\n\", best_key_len ,best_score);\n if (debug) printf(\"Normalized score for keylength %i: %f\\n\", best_key_len,best_norm_score);\n if (debug) printf(\"Size of block for keylength %i: %f\\n\", best_key_len, (float)sb.st_size / best_key_len);\n return best_key_len;\n}\n\nvoid print_xored_data(uint8_t *keys, int key_len){\n for (off_t len = 0; len < sb.st_size; len++)\n putchar(p[len] ^ keys[len%key_len]);\n putchar('\\n');\n}\n\nint main (int argc, char *argv[]){\n if (argc < 2) {\n fprintf (stderr, \"usage: %s <file>\\n\", argv[0]);\n exit(EXIT_FAILURE);\n }\n\n read_into_memory(argv[1]); //fill global pointer (array) p\n int key_len = find_key_len(82);\n printf(\"Guessed key length: %i\\n\", key_len);\n\n uint8_t key = 0, best_key;\n int score, best_score;\n uint8_t keys[key_len];\n\n for (int offset = 0; offset < key_len; offset++){\n best_score = 0;\n for (int i = 0; i < 256; i++, key++){\n score = score_key_keylen(key, key_len, offset);\n if (score > best_score) {\n best_score = score;\n best_key = key;\n }\n }\n keys[offset] = best_key;\n }\n\n printf(\"Decoding file with key: %*.*s\\n\", key_len, key_len, keys);\n print_xored_data(keys, key_len);\n\n free_memory();\n exit(EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.679425835609436, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 22.22222137451172, "blob_id": "14aa52454f515652c74245e59798c46b7c5be1f0", "content_id": "2ef41a0f214a93003c6609f374fbd3ca2d4942f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 209, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/set1/ch7/python.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom Crypto.Cipher import AES\n\nobj = AES.new('YELLOW SUBMARINE', AES.MODE_ECB)\n\nwith open(\"data.bin\", \"rb\") as f:\n data = f.read()\nmessage = obj.decrypt(data)\nprint(message.decode())\n" }, { "alpha_fraction": 0.5201793909072876, "alphanum_fraction": 0.5784753561019897, "avg_line_length": 32.787879943847656, "blob_id": "570e3f05a2eccad97da38d97117772c3f0900387", "content_id": "fe8e467de917971ebf17491be04c60710c47510a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 124, "num_lines": 33, "path": "/set4/ch29/test.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport sha1\nimport hashlib\nimport unittest\n\n\nclass shatest(unittest.TestCase):\n def test_sha(self):\n testData = [b\"test str\", b\"string longer than 512 bits\" + b\"A\" * 512, b\"very long\" + b\"X\" * 3000, bytes(range(256))]\n for msg in testData:\n hash1 = sha1.sha1(msg)\n mac = hashlib.sha1(msg)\n hash2 = mac.hexdigest()\n self.assertEqual(hash1, hash2, \"[ERROR] for msg: {}\".format(msg))\n\n\nclass sha2state(unittest.TestCase):\n def test_sha2state(self):\n testData = [b\"test str\", b\"string longer than 512 bits\" + b\"A\" * 512, b\"very long\" + b\"X\" * 3000, bytes(range(256))]\n for msg in testData:\n hash1 = sha1.sha1(msg)\n h0, h1, h2, h3, h4 = sha1.process_all(msg)\n a0, a1, a2, a3, a4 = sha1.sha2state(hash1)\n self.assertEqual(a0, h0)\n self.assertEqual(a1, h1)\n self.assertEqual(a2, h2)\n self.assertEqual(a3, h3)\n self.assertEqual(a4, h4)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n" }, { "alpha_fraction": 0.5645161271095276, "alphanum_fraction": 0.5924317836761475, "avg_line_length": 27.785715103149414, "blob_id": "57bf4e4cad479646284112262cbe7805e76c1fab", "content_id": "63ef9785be55b4f9ebaa891406eaa65da16754d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1612, "license_type": "no_license", "max_line_length": 103, "num_lines": 56, "path": "/set4/ch26/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nimport random\n\n\nclass PlayGround:\n def __init__(self):\n self.__key = bytes([random.getrandbits(8) for _ in range(16)])\n\n def __append(self, input):\n input = input.translate(str.maketrans(\"\", \"\", \";=\"))\n msg = \"comment1=cooking%20MCs;userdata=\" + input + \";comment2=%20like%20a%20pound%20of%20bacon\"\n msg = msg.encode('ascii')\n return msg\n\n def enc(self, input):\n ctr = Counter.new(64, prefix=b'\\x00'*8, initial_value=0, little_endian=True)\n obj = AES.new(self.__key, AES.MODE_CTR, counter=ctr)\n msg = self.__append(input)\n cip = obj.encrypt(msg)\n return cip\n\n def dec(self, input):\n ctr = Counter.new(64, prefix=b'\\x00'*8, initial_value=0, little_endian=True)\n obj = AES.new(self.__key, AES.MODE_CTR, counter=ctr)\n cip = obj.encrypt(input)\n return cip\n\n def check(self, input):\n data = self.dec(input)\n data = data.decode('ascii', errors='ignore')\n print((\"Check: OK\" if \";admin=true;\" in data else \"Check: Failed\"))\n\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n data = pg.enc('X:admin<true:')\n\n dec = pg.dec(data)\n print(dec)\n\n # bit flipping attack start\n data = bytearray(data)\n data[33] = data[33] ^ 1\n data[39] = data[39] ^ 1\n data[44] = data[44] ^ 1\n data = bytes(data)\n # bit flipping attack end\n\n dec = pg.dec(data)\n print(dec)\n\n print(\"Checking ';admin=true;' substring in string\")\n pg.check(data)\n" }, { "alpha_fraction": 0.6560846567153931, "alphanum_fraction": 0.6931216716766357, "avg_line_length": 36.79999923706055, "blob_id": "0324628e69dba2a13068a7cfc2e97cacee06613f", "content_id": "fbef8408bfbf11145bfe9b5fcdb0f3fbfa46e147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 189, "license_type": "no_license", "max_line_length": 83, "num_lines": 5, "path": "/set1/ch6/conv.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "# binary file to text file in hex form\nxxd -u -p data.bin | tr -d \"\\n\"\n\n# binary file display 10 bytes in binary representation, each line contains 5 bytes\nxxd -l 10 -g 1 -c 5 -b data.bin\n" }, { "alpha_fraction": 0.5541324019432068, "alphanum_fraction": 0.5679072141647339, "avg_line_length": 32.104000091552734, "blob_id": "80a2e02c60ff0cbab62c2efcc9cf20c487d813f9", "content_id": "ceb52676a5b5fe8044086e8d5ce0c81ce645c120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4138, "license_type": "no_license", "max_line_length": 116, "num_lines": 125, "path": "/set2/ch12/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport sys # argv, stderr\n\n\ndef print_debug_block(msg, data):\n print(msg)\n for i in range(len(data) // 16):\n print(data[i*16:(i+1)*16])\n\n\nclass PlayGround:\n def __init__(self, fn):\n import base64\n from Crypto import Random\n self.__passwd = Random.new().read(16)\n with open(fn, \"rb\") as f:\n lines = f.read()\n self.__secret = base64.decodebytes(lines)\n # self.__secret = b\"yellow submarine secret text\"\n\n def append(self, input):\n msg = input + self.__secret\n length = 16 - (len(msg) % 16)\n msg += bytes([length])*length\n return msg\n\n def encryption_oracle(self, input):\n from Crypto.Cipher import AES\n obj = AES.new(self.__passwd, AES.MODE_ECB)\n msg = self.append(input)\n cip = obj.encrypt(msg)\n return cip\n\n\nclass EncData:\n def __init__(self, pg):\n self.__pg = pg # instance of PlayGround\n self.bs = self.__give_enc_block_size() # Size of block - for AES 128 is 16 (128/8)\n self.num_block = len(self.__pg.encryption_oracle(b\"\")) // self.bs\n\n def __give_enc_block_size(self):\n i = 0\n while True:\n diff = len(self.__pg.encryption_oracle(b\"a\"*(i+1))) - len(self.__pg.encryption_oracle(b\"a\"*i))\n if diff == 0:\n i += 1\n else:\n return diff\n\n def __check_ecb_mode(self):\n bs = self.bs # block size\n enc = self.__pg.encryption_oracle(b\"a\"*2*bs)\n return enc[0:bs] == enc[bs:bs*2]\n\n def print_info(self):\n print(\"Length of encrypted block: \"+str(self.bs), file=sys.stderr)\n print(\"Number of encrypted blocks: \"+str(self.num_block), file=sys.stderr)\n print(\"Checking ECB mode: \"+str(self.__check_ecb_mode()), file=sys.stderr)\n\n\nclass Message:\n def __init__(self, pg, enc_data):\n self.pg = pg\n self.bs = enc_data.bs\n\n def __iblock(self): # return important block from varialbe self.enc\n return self.enc[(self.iblok-1)*self.bs:self.iblok*self.bs]\n\n def encode(self): # encode message and return important block\n return self.__iblock()\n\n def print_debug(self):\n print(self)\n print(\"padding: \", str(self.padding))\n print(\"msg: \", str(self.msg))\n print_debug_block(\"enc: \", self.enc)\n print(\"iblock: \", str(self.iblok))\n print_debug_block(\"important block: \", self.__iblock())\n\n\nclass CompareMessage(Message):\n def __init__(self, pg, enc_data):\n super(). __init__(pg, enc_data)\n\n def create(self, k):\n # input: k - last byte of encoded message is k-th byte of secret\n # return: set the variable self.enc\n self.iblok = ((k-1) // 16)+1 # important block that will contain k-th byte of secreet (on last possition)\n self.padding = bytes(b\"a\"*((self.bs-1)-((k-1) % 16)))\n self.msg = self.padding\n self.enc = self.pg.encryption_oracle(self.msg)\n\n\nclass GuessMessage(Message):\n def __init__(self, pg, enc_data):\n super(). __init__(pg, enc_data)\n\n def create(self, k, known, j):\n # input: k - next byte of secret that will be compared to guessed byte j\n # input: known - known part of secret\n # return: set the variable self.enc\n self.iblok = 1\n self.padding = bytes(b\"a\"*(self.bs-k)) # block_size-k paddin of a (aa...aa)\n self.msg = self.padding+bytes(known[max([k-16, 0]):])+bytes([j]) # padding + k-1 known bytes + 1 byte guess\n self.enc = self.pg.encryption_oracle(self.msg)\n\n\nif __name__ == \"__main__\":\n fn = sys.argv[1] if len(sys.argv) > 1 else \"input.b64\"\n pg = PlayGround(fn)\n enc_data = EncData(pg)\n\n known = []\n cmsg = CompareMessage(pg, enc_data)\n gmsg = GuessMessage(pg, enc_data)\n\n for k in range(1, enc_data.bs*enc_data.num_block):\n cmsg.create(k)\n for j in range(256):\n gmsg.create(k, known, j)\n if cmsg.encode() == gmsg.encode():\n known += [j]\n break\n print(\"\".join([chr(k) for k in known]))\n" }, { "alpha_fraction": 0.6101694703102112, "alphanum_fraction": 0.633281946182251, "avg_line_length": 27.217391967773438, "blob_id": "ed4e1f85eda5916d9ce45708e3a6a6877b115013", "content_id": "f042e4427951e26fa8990cdbe2f8421fd577470d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 84, "num_lines": 23, "path": "/set3/ch19/encode.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nfrom Crypto import Random\nimport base64\nimport binascii\n\nif __name__ == \"__main__\":\n passwd = Random.new().read(16)\n\n with open(\"data.b64\", \"rt\") as f:\n lines = f.readlines()\n\n for line in lines:\n msg = base64.decodebytes(line.encode('ascii'))\n\n ctr = Counter.new(64, prefix=b'\\x00'*8, initial_value=0, little_endian=True)\n obj = AES.new(passwd, AES.MODE_CTR, counter=ctr)\n enc = obj.encrypt(msg)\n\n hexd = binascii.hexlify(enc)\n print(\"\".join([chr(d) for d in hexd]))\n" }, { "alpha_fraction": 0.4774627685546875, "alphanum_fraction": 0.623903751373291, "avg_line_length": 45.69523620605469, "blob_id": "9855c48ce8db6aa78bea9d3d10487893fe0f44f8", "content_id": "a84c5905fb7464a273cbd4294eb8d92836bd3403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4903, "license_type": "no_license", "max_line_length": 545, "num_lines": 105, "path": "/set6/ch43/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import DSA\nfrom Crypto.Util import number\nfrom Crypto.Random import random\ndebug = True\np = number.bytes_to_long(b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x89\\xe1\\x85\\x52\\x18\\xa0\\xe7\\xda\\xc3\\x81\\x36\\xff\\xaf\\xa7\\x2e\\xda\\x78\\x59\\xf2\\x17\\x1e\\x25\\xe6\\x5e\\xac\\x69\\x8c\\x17\\x02\\x57\\x8b\\x07\\xdc\\x2a\\x10\\x76\\xda\\x24\\x1c\\x76\\xc6\\x2d\\x37\\x4d\\x83\\x89\\xea\\x5a\\xef\\xfd\\x32\\x26\\xa0\\x53\\x0c\\xc5\\x65\\xf3\\xbf\\x6b\\x50\\x92\\x91\\x39\\xeb\\xea\\xc0\\x4f\\x48\\xc3\\xc8\\x4a\\xfb\\x79\\x6d\\x61\\xe5\\xa4\\xf9\\xa8\\xfd\\xa8\\x12\\xab\\x59\\x49\\x42\\x32\\xc7\\xd2\\xb4\\xde\\xb5\\x0a\\xa1\\x8e\\xe9\\xe1\\x32\\xbf\\xa8\\x5a\\xc4\\x37\\x4d\\x7f\\x90\\x91\\xab\\xc3\\xd0\\x15\\xef\\xc8\\x71\\xa5\\x84\\x47\\x1b\\xb1')\nq = number.bytes_to_long(b'\\xf4\\xf4\\x7f\\x05\\x79\\x4b\\x25\\x61\\x74\\xbb\\xa6\\xe9\\xb3\\x96\\xa7\\x70\\x7e\\x56\\x3c\\x5b')\ng = number.bytes_to_long(b'\\x59\\x58\\xc9\\xd3\\x89\\x8b\\x22\\x4b\\x12\\x67\\x2c\\x0b\\x98\\xe0\\x6c\\x60\\xdf\\x92\\x3c\\xb8\\xbc\\x99\\x9d\\x11\\x94\\x58\\xfe\\xf5\\x38\\xb8\\xfa\\x40\\x46\\xc8\\xdb\\x53\\x03\\x9d\\xb6\\x20\\xc0\\x94\\xc9\\xfa\\x07\\x7e\\xf3\\x89\\xb5\\x32\\x2a\\x55\\x99\\x46\\xa7\\x19\\x03\\xf9\\x90\\xf1\\xf7\\xe0\\xe0\\x25\\xe2\\xd7\\xf7\\xcf\\x49\\x4a\\xff\\x1a\\x04\\x70\\xf5\\xb6\\x4c\\x36\\xb6\\x25\\xa0\\x97\\xf1\\x65\\x1f\\xe7\\x75\\x32\\x35\\x56\\xfe\\x00\\xb3\\x60\\x8c\\x88\\x78\\x92\\x87\\x84\\x80\\xe9\\x90\\x41\\xbe\\x60\\x1a\\x62\\x16\\x6c\\xa6\\x89\\x4b\\xdd\\x41\\xa7\\x05\\x4e\\xc8\\x9f\\x75\\x6b\\xa9\\xfc\\x95\\x30\\x22\\x91')\n\n\nclass MyDSASigner:\n def __init__(self):\n self.__x = 42\n self.y = pow(g, self.__x, p)\n\n def sign(self, h, priv_key=None, nonce=None):\n x = self.__x if priv_key is None else priv_key\n\n while True:\n k = random.StrongRandom().randint(1, q-1) if nonce is None else nonce\n r = pow(g, k, p) % q\n s2 = number.inverse(k, q) * (number.bytes_to_long(h) + x * r)\n s = pow(k, q-2, q) * (number.bytes_to_long(h) + x * r)\n assert(s2 == s)\n s = s % q\n if r != 0 and s != 0:\n self.k = k\n return (r, s)\n\n def verify(self, sig, h, pub_key=None):\n y = self.y if pub_key is None else pub_key\n r, s = sig\n if not 0 < r < q:\n return False\n if not 0 < s < q:\n return False\n w = number.inverse(s, q)\n u1 = (number.bytes_to_long(h) * w) % q\n u2 = (r * w) % q\n v = ((pow(g, u1, p) * pow(y, u2, p)) % p) % q\n return v == r\n\n\ndef test_MyDSASigner():\n msg = b\"Hello\"\n h = SHA.new(msg).digest()\n\n myDSA = MyDSASigner()\n sig_my = myDSA.sign(h)\n sig_my2 = myDSA.sign(h, 42, myDSA.k)\n assert sig_my == sig_my2\n print('My signature :', sig_my)\n\n key = DSA.construct((myDSA.y, g, p, q, 42))\n sig_py = key.sign(h, myDSA.k)\n print('PyCrypto signature:', sig_py)\n\n assert(sig_my == sig_py)\n print(\"Verification my:\", myDSA.verify(sig_my, h))\n print(\"Verification py:\", key.verify(h, sig_py))\n assert myDSA.verify(sig_my, h)\n assert key.verify(h, sig_py)\n\n\ndef find_k(r):\n return 16575\n for k in range(2**16):\n if r == pow(g, k, p) % q:\n return k\n\n\ndef main():\n print('=== Test of my implementation of DSA signer/verifyier ===')\n test_MyDSASigner()\n print('=== Verifying signature ===')\n msg = b\"\"\"For those that envy a MC it can be hazardous to your health\\nSo be friendly, a matter of life and death, just like a etch-a-sketch\\n\"\"\"\n hb = SHA.new(msg).digest()\n hi = int.from_bytes(hb, byteorder='big')\n assert hi == 0xd2d0714f014a9784047eaeccf956520045c45265 # check from website\n y = number.bytes_to_long(b'\\x08\\x4a\\xd4\\x71\\x9d\\x04\\x44\\x95\\x49\\x6a\\x32\\x01\\xc8\\xff\\x48\\x4f\\xeb\\x45\\xb9\\x62\\xe7\\x30\\x2e\\x56\\xa3\\x92\\xae\\xe4\\xab\\xab\\x3e\\x4b\\xde\\xbf\\x29\\x55\\xb4\\x73\\x60\\x12\\xf2\\x1a\\x08\\x08\\x40\\x56\\xb1\\x9b\\xcd\\x7f\\xee\\x56\\x04\\x8e\\x00\\x4e\\x44\\x98\\x4e\\x2f\\x41\\x17\\x88\\xef\\xdc\\x83\\x7a\\x0d\\x2e\\x5a\\xbb\\x7b\\x55\\x50\\x39\\xfd\\x24\\x3a\\xc0\\x1f\\x0f\\xb2\\xed\\x1d\\xec\\x56\\x82\\x80\\xce\\x67\\x8e\\x93\\x18\\x68\\xd2\\x3e\\xb0\\x95\\xfd\\xe9\\xd3\\x77\\x91\\x91\\xb8\\xc0\\x29\\x9d\\x6e\\x07\\xbb\\xb2\\x83\\xe6\\x63\\x34\\x51\\xe5\\x35\\xc4\\x55\\x13\\xb2\\xd3\\x3c\\x99\\xea\\x17')\n r = 548099063082341131477253921760299949438196259240\n s = 857042759984254168557880549501802188789837994940\n myDSA = MyDSASigner()\n print(\"Verification:\", myDSA.verify((r, s), hb, y))\n print('=== Breaking x from k ===')\n k = find_k(r)\n x = (s*k - hi) * number.inverse(r, q)\n x = x % q\n print('x:', x)\n print('=== Verifying x by signer ===')\n sig = myDSA.sign(hb, x, k)\n assert (r, s) == sig\n print('OK')\n print('=== Verifying x by hash ===')\n xh = hex(x)[2:]\n print(\"encoded x:\", xh)\n ch = SHA.new(xh.encode('ascii')).digest()\n print('SHA-1 hash of x:', hex(int.from_bytes(ch, byteorder='big'))[2:])\n assert hex(int.from_bytes(ch, byteorder='big'))[2:] == \"954edd5e0afe5542a4adf012611a91912a3ec16\"\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5102909803390503, "alphanum_fraction": 0.5542938113212585, "avg_line_length": 30.311111450195312, "blob_id": "11b0a4d58f4cdb757ec9171bd61d8237ba59593e", "content_id": "0ff548301c59999a62ea3c51e3be9e238498c02b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 129, "num_lines": 45, "path": "/set2/ch15/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\n\n\nclass PKCS7:\n def __init__(self, base):\n self.__base = base\n\n def encode(self, data):\n p_len = self.__base - (len(data) % self.__base)\n if p_len == 0:\n p_len = self.__base\n data += bytes([p_len])*p_len\n return data\n\n def decode(self, data):\n pad = data[-1]\n if not (0 < pad <= self.__base):\n raise ValueError(\"Last byte of padding (\\\\x{0:02x}) is bigger then base ({1})\".format(pad, self.__base))\n for i in range(len(data)-pad, len(data)):\n if data[i] != pad:\n raise ValueError(\"Padding error on position {0} is byte \\\\x{1:02x} should be \\\\x{2:02x}\".format(i, data[i], pad))\n data = data[:-pad]\n return data\n\nif __name__ == \"__main__\":\n pkcs7 = PKCS7(16)\n try:\n print(pkcs7.decode(b'yellow submarine'))\n except ValueError as e:\n print(e)\n print(pkcs7.decode(b'yellow submarine'+b'\\x10'*16))\n print(pkcs7.decode(b'ICE ICE BABY\\x04\\x04\\x04\\x04'))\n try:\n print(pkcs7.decode(b'ICE ICE BABY\\x04\\x04\\x04\\x24'))\n except ValueError as e:\n print(e)\n try:\n print(pkcs7.decode(b'ICE ICE BABY\\x05\\x05\\x05\\x05'))\n except ValueError as e:\n print(e)\n try:\n print(pkcs7.decode(b'ICE ICE BABY\\x01\\x02\\x03\\x04'))\n except ValueError as e:\n print(e)\n" }, { "alpha_fraction": 0.5440102815628052, "alphanum_fraction": 0.5740661025047302, "avg_line_length": 31.34722137451172, "blob_id": "7735334ccf8ec94fdf54ef7703fdd483a35f553d", "content_id": "f2d12203dc4c9a53ca1d1f8d20e9151f72343a36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2329, "license_type": "no_license", "max_line_length": 99, "num_lines": 72, "path": "/set4/ch29/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nimport hashlib\nimport sha1\n\nmax_key_len = 1000\n\n\nclass PlayGround:\n def __init__(self):\n import random\n self.__key = bytes([random.randint(0, 255) for _ in range(random.randint(0, max_key_len))])\n # self.__key = b\"\" # also working\n\n def check_msg(self, msg, mac_c, debug=True):\n data = self.__key + msg\n obj = hashlib.sha1(data)\n mac_ok = obj.hexdigest()\n if debug:\n print(\"\\n=== Checking message and MAC ===\")\n print(\"msg:\", msg)\n print(\"provided MAC:\", mac_c)\n print(\"computed MAC:\", mac_ok)\n print((\"MAC OK\" if mac_c == mac_ok else \"MAC ERROR\"))\n return (mac_c == mac_ok)\n\n def sign_msg(self, msg):\n data = self.__key + msg\n obj = hashlib.sha1(data)\n mac_ok = obj.hexdigest()\n return mac_ok\n\n\ndef atack(key_len, msg, mac_msg, msg_new):\n # compute glue padding and convert to bytes\n _, glue_padding = sha1.padding(b\"X\" * key_len + msg)\n gl = sha1.chunks(glue_padding, 8)\n glue_padding = bytes([int(i, 2) for i in gl])\n msg_fake = msg + glue_padding + msg_new\n\n _, msg_new_pad = sha1.padding(b\"X\" * key_len + msg + glue_padding + msg_new)\n mnp = sha1.chunks(msg_new_pad, 8)\n msg_new_pad = bytes([int(i, 2) for i in mnp])\n h0, h1, h2, h3, h4 = sha1.sha2state(mac_msg)\n m, _ = sha1.padding(msg_new + msg_new_pad)\n h0, h1, h2, h3, h4 = sha1.process(m, h0, h1, h2, h3, h4)\n mac_fake = '%08x%08x%08x%08x%08x' % (h0, h1, h2, h3, h4)\n\n return msg_fake, mac_fake\n\n\ndef find_key_len(pg):\n for i in range(max_key_len + 1):\n msg_fake, mac_fake = atack(i, b\"msg\", pg.sign_msg(b\"msg\"), b\"msg_new\")\n if pg.check_msg(msg_fake, mac_fake, debug=False):\n print(\"found key length:\", i)\n return i\n exit(\"cannot find key length\")\n\nif __name__ == \"__main__\":\n msg = b\"comment1=cooking%20MCs;userdata=foo;comment2=%20like%20a%20pound%20of%20bacon\"\n pg = PlayGround()\n mac = pg.sign_msg(msg)\n pg.check_msg(msg, mac)\n\n # length extension atack\n print(\"\\n=== length extension atack ===\")\n key_len = find_key_len(pg)\n msg_new = b\";admin=true\"\n msg_fake, mac_fake = atack(key_len, msg, mac, msg_new)\n\n pg.check_msg(msg_fake, mac_fake)\n" }, { "alpha_fraction": 0.3735498785972595, "alphanum_fraction": 0.43619489669799805, "avg_line_length": 25.121212005615234, "blob_id": "5716d16c473cd80f8032da352de1acbd84b29e11", "content_id": "d52758a310fbac8903f5b53bba0d9d979a9a13c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "no_license", "max_line_length": 53, "num_lines": 33, "path": "/set1/ch1/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\n\n\ndef convb64(i):\n if i < 26:\n return chr(ord('A') + i)\n elif i < 52:\n return chr(ord('a') + i-26)\n elif i < 62:\n return chr(ord('0') + i-52)\n elif i == 62:\n return '+'\n elif i == 63:\n return '/'\n\n\nif __name__ == \"__main__\":\n for line in sys.stdin.readlines():\n binarr = bytearray.fromhex(line.strip())\n for i in range(len(binarr) // 3):\n con = binarr[3*i:3*(i+1)]\n # print(con)\n i1 = con[0] >> 2\n i2 = ((con[0] & 3) << 4) + (con[1] >> 4)\n i3 = ((con[1] & 15) << 2) + (con[2] >> 6)\n i4 = con[2] & 63\n print(convb64(i1), end='')\n print(convb64(i2), end='')\n print(convb64(i3), end='')\n print(convb64(i4), end='')\n" }, { "alpha_fraction": 0.5495447516441345, "alphanum_fraction": 0.5736475586891174, "avg_line_length": 27.287878036499023, "blob_id": "3f98fbc7d786f54849bd4442981302858a0b8736", "content_id": "40a8e30abe47bf09fbd81a04c75def308c0a5da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1867, "license_type": "no_license", "max_line_length": 103, "num_lines": 66, "path": "/set2/ch16/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom Crypto.Cipher import AES\n\n\ndef print_debug_block(msg, data):\n print(msg)\n for i in range(len(data) // 16):\n print(data[i*16:(i+1)*16])\n\n\nclass PlayGround:\n def __init__(self):\n from Crypto import Random\n self.__passwd = Random.new().read(16)\n self.__iv = Random.new().read(16)\n\n def __append(self, input):\n input = input.translate(str.maketrans(\"\", \"\", \";=\"))\n msg = \"comment1=cooking%20MCs;userdata=\" + input + \";comment2=%20like%20a%20pound%20of%20bacon\"\n msg = msg.encode('ascii')\n length = 16 - (len(msg) % 16)\n msg += bytes([length])*length\n return msg\n\n def enc(self, input):\n obj = AES.new(self.__passwd, AES.MODE_CBC, self.__iv)\n msg = self.__append(input)\n cip = obj.encrypt(msg)\n return cip\n\n def dec(self, input):\n obj = AES.new(self.__passwd, AES.MODE_CBC, self.__iv)\n cip = obj.decrypt(input)\n return cip\n\n def check(self, input):\n data = self.dec(input)\n data = data.decode('ascii', errors='ignore')\n print(data)\n if \";admin=true;\" in data:\n print(\"Check: OK\")\n else:\n print(\"Check: Failed\")\n\nif __name__ == \"__main__\":\n pg = PlayGround()\n data = pg.enc('XXXXXXXXXXXXXXXX:admin<true:XXXX')\n\n dec = pg.dec(data)\n print_debug_block(\"\\nDecoded before change: \", dec)\n\n # bit flipping attack start\n print(\"\\nAlterning encoded block (bitflipping atack)\")\n data = bytearray(data)\n data[32] = data[32] ^ 1\n data[38] = data[38] ^ 1\n data[43] = data[43] ^ 1\n data = bytes(data)\n # bit flipping attack end\n\n dec = pg.dec(data)\n print_debug_block(\"\\nDecoded after change: \", dec)\n\n print(\"\\nChecking ';admin=true;' substring in string\")\n pg.check(data)\n" }, { "alpha_fraction": 0.5734265446662903, "alphanum_fraction": 0.6013985872268677, "avg_line_length": 14.777777671813965, "blob_id": "e9615f17b21a05c3e22c90117496deac3fab43c4", "content_id": "235432c2350ffb0c527a6b3f8661c2b960fb8d01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 143, "license_type": "no_license", "max_line_length": 38, "num_lines": 9, "path": "/set3/ch17/test.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#for i in {1..100}\n#do\n# ./sollution.py >> output.txt\n#done\n\ncat output.txt | sort | uniq > tmp.txt\nmv tmp.txt output.txt\n\n" }, { "alpha_fraction": 0.45507487654685974, "alphanum_fraction": 0.47420963644981384, "avg_line_length": 22.80198097229004, "blob_id": "3136e57e4634b388bd9c985632b3599b664a5a98", "content_id": "17a931c819e75f883abd617d2f6f8f88e88f9698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2404, "license_type": "no_license", "max_line_length": 70, "num_lines": 101, "path": "/set5/ch40/sollution_e5.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\ndebug = False\n\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\ndef invmod(a, b):\n from Crypto.Util import number\n return number.inverse(a, b)\n # https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n gcd, x, y = egcd(a, b)\n if gcd == 1:\n return (x % b)\n else:\n if debug:\n print('invmod', a, b)\n# invmod(17, 3120) is 2753\n# assert(invmod(17, 3120) == 2753)\n\n\nclass RSA:\n def generate_keys(self, e):\n len = 1024\n # len = 16 # DEBUG\n from Crypto.Util import number\n while True:\n p = number.getPrime(len)\n q = number.getPrime(len)\n n = p * q\n et = (p-1)*(q-1)\n d = invmod(e, et)\n if d is not None:\n break\n print(\"Finding private/public key\")\n if debug:\n print('n:', n, 'p:', p, 'q:', q, 'd:', d, 'e:', e)\n private_key = (d, n)\n public_key = (e, n)\n return (private_key, public_key)\n\n def encrypt(self, msg, pub_key):\n A, n = pub_key\n if type(msg) == int:\n m = msg\n else:\n m = int.from_bytes(msg, byteorder='big')\n return pow(m, A, n)\n\n def decrypt(self, msg, priv_key):\n a, n = priv_key\n msg = pow(msg, a, n)\n m = msg.to_bytes((msg.bit_length() // 8) + 1, byteorder='big')\n return m\n\n\ndef crack(cip, key):\n assert(len(key) == len(cip))\n e = key[0][0]\n assert(e == len(key))\n # http://www.di-mgt.com.au/crt.html#crackingrsa\n # TODO check if n (key[1]) are coprimes\n N = 1\n for k in key:\n N *= k[1]\n X = 0\n for i, k in enumerate(key):\n n = k[1]\n X += cip[i] * (N // n) * invmod((N // n), n)\n X = X % N\n dec = X ** (1. / e)\n if round(dec)**e == X:\n return round(dec)\n else:\n return dec\n\n\ndef main():\n msg = 42\n e = 7 # public key\n print(\"message:\", msg)\n rsa = RSA()\n key = []\n cip = []\n for i in range(e):\n _, k = rsa.generate_keys(e)\n key += [k]\n cip += [rsa.encrypt(msg, k)] # encrypted cipher text\n dec = crack(cip, key)\n print(\"message after encoding / decoding:\", dec)\n assert(msg == dec)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5824176073074341, "alphanum_fraction": 0.6373626589775085, "avg_line_length": 35.400001525878906, "blob_id": "80ecc4e9caf91e3375805090b86415e7526cd694", "content_id": "86dd3bdef187324b8f685119787c322348e7aaaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 182, "license_type": "no_license", "max_line_length": 66, "num_lines": 5, "path": "/set1/ch6/data/download_input.sh", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwget http://cryptopals.com/static/challenge-data/6.txt -O data.b64\ncat data.b64 | base64 -d > data.bin\ncat data.bin | hexdump -v -e '/1 \"%02X \"' | tr -d \" \" > data.hex\n" }, { "alpha_fraction": 0.466761976480484, "alphanum_fraction": 0.48570406436920166, "avg_line_length": 23.98214340209961, "blob_id": "f259e17c22cafb24a2a894c953194084f7b14452", "content_id": "0aa8b54c236be203b2178a5e31c4b806965f1a74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2798, "license_type": "no_license", "max_line_length": 70, "num_lines": 112, "path": "/set6/ch46/sollution.py", "repo_name": "PKlimo/CryptoChallenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom __future__ import print_function\ndebug = True\n\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\ndef invmod(a, b):\n # https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n gcd, x, y = egcd(a, b)\n if gcd == 1:\n return (x % b)\n# invmod(17, 3120) is 2753\nassert(invmod(17, 3120) == 2753)\n\n\nclass RSA:\n def generate_keys(self, len):\n from Crypto.Util import number\n print(\"Finding first prime number\")\n p = number.getPrime(len)\n # p = 7\n print(\"Finding second prime number\")\n q = number.getPrime(len)\n # q = 11\n n = p * q\n et = (p-1)*(q-1)\n e = 3\n print(\"Finding private/public key\")\n while True:\n d = invmod(e, et)\n if d is not None:\n break\n else:\n e += 1\n print('e:', e)\n print('d:', d)\n private_key = (d, n)\n public_key = (e, n)\n return (private_key, public_key)\n\n def encrypt(self, msg, pub_key):\n A, n = pub_key\n m = int.from_bytes(msg, byteorder='big')\n if m > n:\n print(\"Message too long\")\n return None\n else:\n return pow(m, A, n)\n\n def decrypt(self, msg, priv_key):\n a, n = priv_key\n msg = pow(msg, a, n)\n m = msg.to_bytes((msg.bit_length() // 8) + 1, byteorder='big')\n return m\n\n\nclass PlayGround:\n def __init__(self):\n msg = b'dGVzdA==' # test\n msg = b'VGhhdCdzIHdoeSBJIGZvdW5kIHlvdSBkb24ndCBwbGF5IGFyb3VuZCB3aXRoIHRoZSBGdW5reSBDb2xkIE1lZGluYQ=='\n import base64\n self.__msg = base64.b64decode(msg)\n self.__rsa = RSA()\n self.__priv_key, self.pub_key = self.__rsa.generate_keys(1024)\n self.enc = self.__rsa.encrypt(self.__msg, self.pub_key)\n\n def oraculum_is_even(self, msg):\n a, n = self.__priv_key\n m = pow(msg, a, n)\n return m % 2 == 0\n\n\ndef kolo(i, pg, min, max):\n e, n = pg.pub_key\n cip = pg.enc * ((2**(e*i)) % n)\n bit = pg.oraculum_is_even(cip)\n if bit:\n max = max - (n // 2**i)\n else:\n min = min + (n // 2**i)\n if debug:\n print('round:', i, 'min:', min, 'max:', max)\n return min, max\n\n\ndef main():\n pg = PlayGround()\n e, n = pg.pub_key\n l = n.bit_length() + 1\n min = 0\n max = n\n for i in range(1, l):\n min, max = kolo(i, pg, min, max)\n print(\"finding plaintext from interval:\", min, max)\n for i in range(min, max):\n if pow(i, e, n) == pg.enc:\n print('Decoded number found:', i)\n break\n import math\n dec = i.to_bytes(math.ceil(i.bit_length()/8), byteorder='big')\n print(\"Decoded string:\", dec.decode('ascii'))\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
60
Gimb0/KS1911-SIMS
https://github.com/Gimb0/KS1911-SIMS
74a4a30359142e78becf1ff7fc3cdaf4d2b643a7
b57a5a1ba2b7b779f2779feeb52b0166f7e7a72e
dd0eb38990ffc200c1d18389938ea8ad80728952
refs/heads/master
2022-07-10T04:59:49.365498
2022-07-04T07:51:43
2022-07-04T07:51:43
198,956,106
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7703348994255066, "alphanum_fraction": 0.8086124658584595, "avg_line_length": 51.25, "blob_id": "7adb579425d9540348c1754ff8fb9994d2ba9e26", "content_id": "7d740cc95248b1c9db6a3ffe06c1e0373241bc57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 209, "license_type": "permissive", "max_line_length": 126, "num_lines": 4, "path": "/README.md", "repo_name": "Gimb0/KS1911-SIMS", "src_encoding": "UTF-8", "text": "# KS1911-SIMS\nProfessional Experience Group KS1911 Project - SIMS Data Extractor\n\n[https://en.wikipedia.org/wiki/Secondary_ion_mass_spectrometry](https://en.wikipedia.org/wiki/Secondary_ion_mass_spectrometry)\n" }, { "alpha_fraction": 0.609677255153656, "alphanum_fraction": 0.614501416683197, "avg_line_length": 40.79435348510742, "blob_id": "4d820e3ae66ae5cf91f54bea5f8c7e24dfaf28cb", "content_id": "199fd6cfd4c007272d0434a8b5da8c4766ee8812", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20729, "license_type": "permissive", "max_line_length": 344, "num_lines": 496, "path": "/sims/MainUI.py", "repo_name": "Gimb0/KS1911-SIMS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom PyQt5 import QtWidgets, uic\nfrom io import BytesIO, StringIO, SEEK_SET\nimport pandas as pd;\nfrom datetime import timedelta, time\nfrom db_utils import dbUtils\n\nclass MainUI(QtWidgets.QMainWindow):\n def __init__(self):\n super(MainUI, self).__init__()\n uic.loadUi('MainUI.ui', self)\n \n self.isFileOpen = False\n\n # Add Actions to buttons and menus\n # Open Menu Button\n openFile = self.findChild(QtWidgets.QAction, 'actionOpenFile')\n openFile.triggered.connect(self.openDataFile)\n \n # Exit Menu Button\n exitApp = self.findChild(QtWidgets.QAction, 'actionExit')\n exitApp.triggered.connect(self.closeApp)\n\n # Save Button\n saveDataButton = self.findChild(QtWidgets.QPushButton, 'saveDataButton')\n saveDataButton.clicked.connect(self.saveInputData)\n\n # Open Button\n openFileButton = self.findChild(QtWidgets.QPushButton, 'openDFButton')\n openFileButton.clicked.connect(self.openDataFile)\n\n # Filter Button\n filterButton = self.findChild(QtWidgets.QPushButton, 'filterButton')\n filterButton.clicked.connect(self.filterSamples)\n\n # Clear Filter Button\n clearButton = self.findChild(QtWidgets.QPushButton, 'clearButton')\n clearButton.clicked.connect(self.updateExtractInterface)\n\n # Extract Data Button\n extractDataButton = self.findChild(QtWidgets.QPushButton, 'extractDataButton')\n extractDataButton.clicked.connect(self.extractData)\n\n self.getUIComponents()\n\n # Database class\n self.dbConn = dbUtils()\n\n self.initializeInputInterface()\n self.updateExtractInterface()\n\n # Show App Window\n self.show()\n\n def extractSimsData(self, handle):\n databuf = StringIO()\n self.dataPoints = 0\n for _ in range(4):\n next(handle)\n for line in handle:\n if line.startswith('*** DATA END ***') or not line.strip():\n databuf.seek(SEEK_SET)\n return databuf\n databuf.write(line)\n self.dataPoints += 1\n \n def extractSpeciesList(self, handle):\n names = []\n\n for _ in range(3):\n next(handle)\n for line in handle:\n if line.rstrip():\n name = line.split()[0]\n names.append(name)\n else:\n return names\n\n def getUIComponents(self):\n # Input Tab\n # Sample ID\n self.inputSampleID = self.findChild(QtWidgets.QLineEdit, 'sampleIDText')\n # Date of Analysis\n self.analysisDateValue = self.findChild(QtWidgets.QLineEdit, 'sampleDate')\n # Acquisition Time\n self.acqTimeTime = self.findChild(QtWidgets.QLineEdit, 'sampleAcqTime')\n # Sample Species\n self.speciesListText = self.findChild(QtWidgets.QLineEdit, 'speciesListText')\n # Primary Ions\n self.pIonsText = self.findChild(QtWidgets.QLineEdit, 'samplePriIONText')\n # Primary Ions Energy\n self.pIonsEnergyValue = self.findChild(QtWidgets.QLineEdit, 'priIonEValue')\n # Number of data points\n self.dataPointsText = self.findChild(QtWidgets.QLineEdit, 'sampleDataPoints')\n # Annealing Temperature\n self.annTemp = self.findChild(QtWidgets.QDoubleSpinBox, 'inputAnnTemp')\n # Annealing Time\n self.annTime = self.findChild(QtWidgets.QDoubleSpinBox, 'inputAnnTime')\n # Gas Composition\n self.gasComp = self.findChild(QtWidgets.QComboBox, 'inputGasComposition')\n # Cooling Method\n self.coolingMethod = self.findChild(QtWidgets.QComboBox, 'inputCoolingMethod')\n # Matrix Composition\n self.matrixComp = self.findChild(QtWidgets.QComboBox, 'matrixCompComboBox')\n # Sputtering Rate\n self.sputtRate = self.findChild(QtWidgets.QDoubleSpinBox, 'inputSputtRate')\n # Additional Notes\n self.addNotes = self.findChild(QtWidgets.QTextEdit, 'addNotesText')\n\n # Display Tab\n # Sample ID\n self.displaySampleID = self.findChild(QtWidgets.QLineEdit, 'displaySampleID')\n # Annealing Temperature\n self.displayAnnTemp = self.findChild(QtWidgets.QLineEdit, 'displayAnnTemp')\n # Annealing Time\n self.displayAnnTime = self.findChild(QtWidgets.QLineEdit, 'displayAnnTime')\n # Analysis Date\n self.displayAnalysisDate = self.findChild(QtWidgets.QLineEdit, 'displayAnalDate')\n # Total Acquisition Time\n self.displayTotAcqTime = self.findChild(QtWidgets.QLineEdit, 'displayAcqTime')\n # Data Points\n self.displayDataPoints = self.findChild(QtWidgets.QLineEdit, 'displayDataPoints')\n # Species List\n self.displaySpecies = self.findChild(QtWidgets.QLineEdit, 'displaySpeciesList')\n # Primary Ion\n self.displayPriIon = self.findChild(QtWidgets.QLineEdit, 'displayPriIon')\n # Primary Ion Energy\n self.displayPriIonEnergy = self.findChild(QtWidgets.QLineEdit, 'displayPriIonE')\n # Gas Composition\n self.displayGasComp = self.findChild(QtWidgets.QLineEdit, 'displayGasComp')\n # Cooling Method\n self.displayCoolingMethod = self.findChild(QtWidgets.QLineEdit, 'displayCoolMethod')\n # Matrix Composition\n self.displayMatrixComp = self.findChild(QtWidgets.QLineEdit, 'displayMatrixComp')\n # Sputtering Rate\n self.displaySputtRate = self.findChild(QtWidgets.QLineEdit, 'displaySputtRate')\n # Additional Notes\n self.displayAddNotes = self.findChild(QtWidgets.QTextEdit, 'displayAddNotes')\n\n # Extract Tab\n # Samples List\n self.samplesList = self.findChild(QtWidgets.QListWidget, 'samplesList')\n # Filter Species List\n self.filterSpeciesList = self.findChild(QtWidgets.QListWidget, 'filterSpeciesList')\n # Filter Annealing Temperature List\n self.filterAnnTempsList = self.findChild(QtWidgets.QListWidget, 'filterAnnTempList')\n # Filter Cooling Method List\n self.filterCoolingMethodList = self.findChild(QtWidgets.QListWidget, 'filterCoolingList')\n # Filter Gas Composition List\n self.filterGasCompList = self.findChild(QtWidgets.QListWidget, 'filterGasCompList')\n # Filter Matrix Composition List\n self.filterMatrixCompList = self.findChild(QtWidgets.QListWidget, 'filterMatrixCompList')\n # Processing Normalization Species List\n self.normList = self.findChild(QtWidgets.QListWidget, 'outNormList')\n # Output Species List\n self.specieList = self.findChild(QtWidgets.QListWidget, 'outSpeciesList')\n\n def initializeInputInterface(self):\n # Fill lists with data from database\n # Gas Comp\n for gas in self.dbConn.getGasComposition():\n self.gasComp.addItem(gas[0])\n # Matrix Comp\n for matrix in self.dbConn.getMatrixComposition():\n self.matrixComp.addItem(matrix[0])\n # Cooling Method\n for method in self.dbConn.getCoolingMethod():\n self.coolingMethod.addItem(method[0])\n\n def updateExtractInterface(self):\n # Samples List\n self.samplesList.clicked.connect(self.sampleSelected)\n self.samplesList.clear()\n for sample in self.dbConn.getSamples():\n self.samplesList.addItem(sample[0])\n\n # Filter Species List\n self.filterSpeciesList.clear()\n for specie in self.dbConn.getSpecies():\n if specie[0] == \"\":\n pass\n self.filterSpeciesList.addItem(specie[0])\n\n # Filter Annealing Temps List\n self.filterAnnTempsList.clear()\n for temp in self.dbConn.getAnnealingTemps():\n if temp[0] == \"\":\n pass\n self.filterAnnTempsList.addItem(str(temp[0]))\n\n # Filter Cooling Method List\n self.filterCoolingMethodList.clear()\n for method in self.dbConn.getCoolingMethod():\n if method[0] == \"\":\n pass\n self.filterCoolingMethodList.addItem(method[0])\n \n # Filter Gas Composition List\n self.filterGasCompList.clear()\n for gas in self.dbConn.getGasComposition():\n if gas[0] == \"\":\n pass\n self.filterGasCompList.addItem(gas[0])\n\n # Filter Matrix Composition List\n self.filterMatrixCompList.clear()\n for matrix in self.dbConn.getMatrixComposition():\n if matrix[0] == \"\":\n pass\n self.filterMatrixCompList.addItem(matrix[0])\n\n def updateInputInterface(self):\n try:\n self.sampleIDText.setText(self.sampleID)\n self.analysisDateValue.setText(self.analysisDate)\n self.acqTimeTime.setText(self.acqTime)\n self.speciesListText.setText(self.speciesListString)\n self.pIonsText.setText(self.pIons)\n self.pIonsEnergyValue.setText(self.pIonsEnergy)\n self.dataPointsText.setText(str(self.dataPoints))\n except Exception as e:\n self.createPopupMessage('Error', e[0])\n return\n\n def updateDisplayInterface(self, sampleID):\n sampleData = self.dbConn.getSampleMetadata(sampleID)\n sampleSpecies = self.dbConn.getSampleSpecies(sampleID)\n\n # Display sample details on Sample Information tab\n self.displaySampleID.setText(sampleData[0])\n self.displayAnalysisDate.setText(sampleData[2])\n self.displayTotAcqTime.setText(sampleData[3])\n self.displayAnnTemp.setText(str(sampleData[4]))\n self.displayAnnTime.setText(str(sampleData[5]))\n self.displayGasComp.setText(sampleData[6])\n self.displayCoolingMethod.setText(sampleData[7])\n self.displayMatrixComp.setText(sampleData[8])\n self.displaySputtRate.setText(str(sampleData[9]))\n self.displayPriIon.setText(sampleData[10])\n self.displayPriIonEnergy.setText(str(sampleData[11]))\n self.displayAddNotes.setText(sampleData[12])\n self.displayDataPoints.setText(str(sampleData[13]))\n\n speciesList = \"\"\n for specie in sampleSpecies:\n speciesList += specie[0] + \", \"\n \n self.displaySpecies.setText(speciesList[:-2])\n \n def sampleSelected(self):\n # Get Sample ID\n sampleID = self.samplesList.currentItem().text()\n \n self.updateDisplayInterface(sampleID)\n\n # Add sample species to list on extract tab \n self.normList.clear()\n self.specieList.clear()\n for specie in self.dbConn.getSampleSpecies(sampleID):\n self.normList.addItem(specie[0])\n self.specieList.addItem(specie[0])\n\n def openDataFile(self):\n try:\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n dataFile = open(filename[0], 'rt')\n # Check if datafile is valid\n if not dataFile.readline().startswith('*** DATA FILES ***'):\n self.createPopupMessage('Error', 'Invalid Data File')\n return\n self.isFileOpen = True\n except FileNotFoundError:\n # A file was not chosen or it was wrongly selected so do nothing\n self.createPopupMessage(\"Error\", \"Please select an existing data file\")\n return\n \n try:\n # Extract data from data file\n for line in dataFile:\n if line.startswith('Sample ID'):\n self.sampleID = line.split()[-1]\n if line.startswith('Analysis date'):\n self.analysisDate = line.split()[-1]\n if line.startswith('*** DATA START ***'):\n simsdata = self.extractSimsData(dataFile)\n if line.startswith('Total acquisition time (s)'):\n self.acqTime = int(line.split()[-1])\n self.acqTime = str(timedelta(seconds=self.acqTime))\n if line.startswith(\"*** MEASUREMENT CONDITIONS\"):\n speciesList = self.extractSpeciesList(dataFile)\n self.header = []\n self.speciesListString = \"\"\n for specie in speciesList:\n self.speciesListString += specie + \", \"\n self.header.extend([f'time-{specie}', f'count-{specie}'])\n self.speciesListString = self.speciesListString[0:-2]\n if line.startswith(\"Primary ions\"):\n self.pIons = line.split()[-1]\n if line.startswith(\"Impact energy\"):\n self.pIonsEnergy = line.split()[-1]\n \n self.updateInputInterface()\n \n # Create pandas data frame\n self.df = pd.read_csv(simsdata, header=None, delim_whitespace=True, skip_blank_lines=True)\n simsdata.close()\n self.df.columns = self.header\n \n except Exception as e:\n self.createPopupMessage('Error', 'Error extracting information from File: ' + e.args[0])\n\n finally:\n dataFile.close()\n\n # Save Data File and User Input to SQLite Database\n def saveInputData(self):\n msg = \"Successfully saved data to database\"\n try:\n # Do nothing if file has not been opened\n if self.isFileOpen is not True:\n return\n # Pass necessary data to insert functions\n self.dbConn.insertSampleData(self.sampleID, self.df.to_json(), self.analysisDate, self.acqTime, self.annTemp.value(), self.annTime.value(), self.gasComp.currentText(), self.coolingMethod.currentText(), self.matrixComp.currentText(), self.sputtRate.value(), self.pIons, self.pIonsEnergy, self.addNotes.toPlainText(), self.dataPoints)\n self.dbConn.insertAnnealingTemp(self.annTemp.value())\n self.dbConn.insertCoolingMethod(self.coolingMethod.currentText())\n self.dbConn.insertGasComp(self.gasComp.currentText())\n self.dbConn.insertMatrixComp(self.matrixComp.currentText())\n\n species = self.speciesListString.split()\n for specie in species:\n specie = specie.strip(',')\n self.dbConn.insertSpecies(specie)\n self.dbConn.insertIntSpecies(self.sampleID, specie)\n\n # Commit changes to database\n self.dbConn.dbCommit()\n except Exception as e:\n msg = e.args[0]\n finally: \n self.createPopupMessage('Message', msg)\n \n self.updateExtractInterface()\n\n def filterSamples(self):\n # Get list of samples filtered by species\n speciesQuery = \"\"\n if len(self.filterSpeciesList.selectedItems()) > 0:\n for specie in self.filterSpeciesList.selectedItems():\n if specie.text() == \"\":\n next\n speciesQuery += \"specie = \\\"\" + specie.text() + \"\\\" OR \"\n samplesList1 = self.dbConn.getSamplesWithSpecies(speciesQuery[:-4])\n\n metadataQuery = \"\"\n\n # Add Annealing Temperature filters to query\n tempList = self.filterAnnTempsList.selectedItems()\n if len(tempList) > 0:\n metadataQuery += \"(\"\n for temp in tempList:\n if temp.text() == \"0.0\":\n next\n metadataQuery += \"annealingTemp = \" + temp.text() + \" OR \"\n metadataQuery = metadataQuery[:-4] + \") AND \"\n # Add Cooling Method filters to query\n methodList = self.filterCoolingMethodList.selectedItems()\n if len(methodList) > 0:\n metadataQuery += \"(\"\n for method in methodList:\n if method.text() == \"\":\n next\n metadataQuery += \"coolingMethod = \\\"\" + method.text() + \"\\\" OR \"\n metadataQuery = metadataQuery[:-4] + \") AND \"\n gasList = self.filterGasCompList.selectedItems()\n # Add Gas Composition filters to query\n if len(gasList) > 0:\n metadataQuery += \"(\"\n for gas in gasList:\n if gas.text() == \"\":\n next\n metadataQuery += \"gasComposition = \\\"\" + gas.text() + \"\\\" OR \"\n metadataQuery = metadataQuery[:-4] + \") AND \"\n matrixList = self.filterMatrixCompList.selectedItems()\n # Add Matrix Composition filters to query\n if len(matrixList) > 0:\n metadataQuery += \"(\"\n for matrix in matrixList:\n if matrix.text() == \"\":\n next\n metadataQuery += \"matrixComposition = \\\"\" + matrix.text() + \"\\\" OR \"\n metadataQuery = metadataQuery[:-4] + \")\"\n else:\n metadataQuery = metadataQuery[:-5]\n \n samplesList2 = self.dbConn.getSamplesWithMetadata(metadataQuery)\n\n # Combine Sample lists and add to GUI list\n if speciesQuery == \"\" and metadataQuery == \"\":\n return\n elif speciesQuery == \"\":\n self.samplesList.clear()\n for sample in samplesList2:\n self.samplesList.addItem(sample[0])\n elif metadataQuery == \"\":\n self.samplesList.clear()\n for sample in samplesList1:\n self.samplesList.addItem(sample[0])\n else:\n self.samplesList.clear()\n for sample in set(samplesList1) and set(samplesList2):\n self.samplesList.addItem(sample[0])\n\n def createPopupMessage(self, title=\"Title\", msg=\"Message\"):\n QtWidgets.QMessageBox.about(self, title, msg)\n\n def extractData(self):\n # Check if necessary items have been selected\n sample = self.samplesList.selectedItems()\n normSpecies = self.normList.selectedItems()\n outputSpecies = self.specieList.selectedItems()\n if len(sample) == 0:\n self.createPopupMessage(\"Error\", \"Select a sample to extract data from\")\n return\n elif len(normSpecies) == 0 or len(self.normList.selectedItems()) > 2:\n self.createPopupMessage(\"Error\", \"Select 1 or 2 normalisation species\")\n return\n elif len(outputSpecies) == 0:\n self.createPopupMessage(\"Error\", \"Select species to output\")\n return\n\n sampleID = sample[0].text()\n simsData = pd.read_json(self.dbConn.getSimsData(sampleID)[0])\n\n # Get all columns for time\n time = simsData.columns[simsData.columns.str.startswith('time')]\n\n # Calculate and store depth for each row\n depthDF = pd.DataFrame({'Depth (nm)':simsData[time].mean(axis=1)})\n depthDF['Depth (nm)'] = depthDF['Depth (nm)'] * float(self.displaySputtRate.text())\n\n # Calculate normalization factor\n # Create dataframe with 1 column \n \n # 1 species selected\n if len(normSpecies) == 1:\n normDF = pd.DataFrame({'Normalization Factor':simsData['count-'+normSpecies[0].text()]})\n # 2 species selected\n elif len(normSpecies) == 2:\n normDF = pd.DataFrame({'Normalization Factor':simsData['count-'+normSpecies[0].text()]*simsData['count-'+normSpecies[1].text()]})\n\n # Get list of every species count\n speciesList1 = []\n speciesList2 = []\n fileNameStr = \"\"\n\n # Every species in sample \n species = simsData.columns[simsData.columns.str.startswith('count')]\n for s in species:\n speciesList1.append(s)\n # Every species selected \n for specie in outputSpecies:\n speciesList2.append('count-'+specie.text())\n fileNameStr += '-' + specie.text()\n \n # List of every selected species in sample\n speciesList = list(set(speciesList1) & set(speciesList2))\n \n speciesDF = simsData[speciesList]\n\n for specie in speciesList:\n speciesDF[specie] = speciesDF[specie] / normDF['Normalization Factor']\n\n # Merge depth dataframe and species dataframe\n processedDF = pd.merge(depthDF, speciesDF, left_index=True, right_index=True)\n\n # Allow user to select where to save file\n try:\n fName = sampleID + \"_\" + fileNameStr + '.csv'\n filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', fName)\n processedDF.to_csv(filename[0])\n self.createPopupMessage(\"Success\", \"Saved file successfully\")\n except:\n processedDF.to_csv(fName)\n self.createPopupMessage('Error', 'Saved File elsewhere' + fName)\n\n\n def closeApp(self):\n self.dbConn.dbClose()\n sys.exit(0)\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = MainUI()\napp.exec_()\napp.aboutToQuit(window.closeApp())" }, { "alpha_fraction": 0.6281294226646423, "alphanum_fraction": 0.6288380026817322, "avg_line_length": 40.11165237426758, "blob_id": "5476e94580889f6277e9cccc72a79c7cda56544b", "content_id": "c0a41bc395dae2956feb4014f174511487293e79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8468, "license_type": "permissive", "max_line_length": 538, "num_lines": 206, "path": "/sims/db_utils.py", "repo_name": "Gimb0/KS1911-SIMS", "src_encoding": "UTF-8", "text": "import sqlite3\nimport os\n\n# Add option to open a database\nDEFAULT_PATH = os.path.join(os.path.dirname(__file__), 'simsdatastore.sqlite3')\n\nclass dbUtils():\n def __init__(self):\n if not os.path.exists(DEFAULT_PATH) and not os.path.isfile(DEFAULT_PATH):\n self.createDB()\n else:\n self.con = self.dbConnect()\n self.cur = self.con.cursor()\n\n def dbConnect(self, dbPath=DEFAULT_PATH):\n return sqlite3.connect(dbPath)\n\n def dbClose(self):\n self.cur.close()\n self.con.close()\n\n def dbCommit(self):\n self.con.commit()\n\n # Create SQLite3 DB here\n def createDB(self):\n self.con = self.dbConnect()\n self.cur = self.con.cursor()\n \n # Create Sample Data Table\n sampleMDTable = \"\"\"CREATE TABLE sampleData (\n sampleID TEXT PRIMARY KEY,\n simsData BLOB,\n acquisitionDate TEXT,\n totalAcquisitionTime TEXT,\n annealingTemp REAL,\n annealingTime REAL,\n gasComposition TEXT,\n coolingMethod TEXT,\n matrixComposition TEXT,\n sputteringRate REAL,\n primaryIon TEXT,\n primaryIonEnergy REAL,\n additionalNotes TEXT,\n dataPoints INTEGER\n ) \"\"\"\n self.cur.execute(sampleMDTable)\n\n matrixCompTable = \"\"\"CREATE TABLE matrixCompositions (\n matrix TEXT PRIMARY KEY\n )\"\"\"\n self.cur.execute(matrixCompTable)\n\n # Create Gas Composition Table\n gasCompTable = \"\"\"CREATE TABLE gasCompositions (\n gas TEXT PRIMARY KEY\n )\"\"\"\n self.cur.execute(gasCompTable)\n\n # Create Cooling Method Table\n coolingMethodTable = \"\"\"CREATE TABLE coolingMethod (\n method TEXT PRIMARY KEY\n )\"\"\"\n self.cur.execute(coolingMethodTable)\n\n # Create Annealing Temperature Table\n annealingTempTable = \"\"\"CREATE TABLE annealingTemp (\n temperature REAL PRIMARY KEY\n )\"\"\"\n self.cur.execute(annealingTempTable)\n\n # Create Species Table\n speciesTable = \"\"\"CREATE TABLE species (\n specie TEXT PRIMARY KEY\n )\"\"\"\n self.cur.execute(speciesTable)\n\n # Create intermediate species table\n intSpeciesTable = \"\"\"CREATE TABLE intSpecies (\n sampleID TEXT,\n specie TEXT NOT NULL,\n FOREIGN KEY (specie) REFERENCES species(specie)\n )\"\"\"\n self.cur.execute(intSpeciesTable)\n \n # Insert to or Update the sampleData Table\n def insertSampleData(self, sampleID=None, sampleData=None, acquisitionDate=None, totalAcquistionTime=None, annealingTemp=None, annealingTime=None, gasComposition=None, coolingMethod=None, matrixComposition=None, sputteringRate=None, primaryIon=None, primaryIonEnergy=None, additionalNotes=None, dataPoints=None):\n # Sample ID must not be None, everything else can be\n if sampleID == None:\n return\n\n # Check if row exists with current sampleID\n self.cur.execute(\"\"\"SELECT sampleID FROM sampleData WHERE sampleID = ?\"\"\", (sampleID, ))\n data = self.cur.fetchone()\n # Insert row if doesn't exist\n if data == None:\n self.cur.execute(\"INSERT INTO sampleData (sampleID, simsData, acquisitionDate, totalAcquisitionTime, annealingTemp, annealingTime, gasComposition, coolingMethod, matrixComposition, sputteringRate, primaryIon, primaryIonEnergy, additionalNotes, dataPoints ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (sampleID, sampleData, acquisitionDate, totalAcquistionTime, annealingTemp, annealingTime, gasComposition, coolingMethod, matrixComposition, sputteringRate, primaryIon, primaryIonEnergy, additionalNotes, dataPoints))\n # Update row if does exist\n else:\n self.cur.execute(\"\"\"UPDATE sampleData SET annealingTemp = ?, annealingTime = ?, gasComposition = ?, coolingMethod = ?, matrixComposition = ?, sputteringRate = ?, additionalNotes = ? WHERE sampleID = ?\"\"\", (annealingTemp, annealingTime, gasComposition, coolingMethod, matrixComposition, sputteringRate, additionalNotes, sampleID))\n \n def insertGasComp(self, gasComposition):\n if gasComposition == \"\":\n return\n self.cur.execute(\"\"\"SELECT gas FROM gasCompositions WHERE gas = ?\"\"\", (gasComposition, ))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"INSERT INTO gasCompositions (gas) VALUES (?)\", (gasComposition, ))\n\n def insertCoolingMethod(self, coolingMethod):\n if coolingMethod == \"\":\n return\n self.cur.execute(\"\"\"SELECT method FROM coolingMethod WHERE method = ?\"\"\", (coolingMethod, ))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"INSERT INTO coolingMethod (method) VALUES (?)\", (coolingMethod, ))\n\n def insertAnnealingTemp(self, temperature):\n if temperature == 0.0:\n return\n self.cur.execute(\"\"\"SELECT * FROM annealingTemp WHERE temperature = ?\"\"\", (temperature, ))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"INSERT INTO annealingTemp (temperature) VALUES (?)\", (temperature, ))\n\n def insertMatrixComp(self, matrix):\n if matrix == \"\":\n return\n self.cur.execute(\"\"\"SELECT * FROM matrixCompositions WHERE matrix = ?\"\"\", (matrix, ))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"\"\"INSERT INTO matrixCompositions (matrix) VALUES (?)\"\"\", (matrix, ))\n\n def insertSpecies(self, specie):\n if specie == \"\":\n return\n self.cur.execute(\"\"\"SELECT * FROM species WHERE specie = ?\"\"\", (specie, ))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"INSERT INTO species (specie) VALUES (?)\", (specie, )) \n\n def insertIntSpecies(self, sampleID, specie):\n if sampleID is None or specie is None:\n return\n self.cur.execute(\"\"\"SELECT * FROM intSpecies WHERE sampleID = ? AND specie = ?\"\"\", (sampleID, specie))\n data = self.cur.fetchone()\n if data is None:\n self.cur.execute(\"INSERT INTO intSpecies (sampleID, specie) VALUES (?,?)\", (sampleID, specie))\n\n def getSamples(self):\n self.cur.execute(\"\"\"SELECT sampleID FROM sampleData\"\"\")\n return self.cur.fetchall()\n\n def getSpecies(self):\n self.cur.execute(\"\"\"SELECT specie from species\"\"\")\n return self.cur.fetchall()\n\n def getAnnealingTemps(self):\n self.cur.execute(\"\"\"SELECT temperature FROM annealingTemp\"\"\")\n return self.cur.fetchall()\n\n def getAnnealingTime(self, sampleID):\n self.cur.execute(\"\"\"SELECT annealingTime FROM sampleData WHERE sampleID = ?\"\"\", (sampleID, ))\n return self.cur.fetchone()\n\n def getCoolingMethod(self):\n self.cur.execute(\"\"\"SELECT method FROM coolingMethod\"\"\")\n return self.cur.fetchall()\n\n def getGasComposition(self):\n self.cur.execute(\"\"\"SELECT gas FROM gasCompositions\"\"\")\n return self.cur.fetchall()\n \n def getMatrixComposition(self):\n self.cur.execute(\"\"\"SELECT matrix FROM matrixCompositions\"\"\")\n return self.cur.fetchall()\n\n def getSputteringRate(self, sampleID):\n self.cur.execute(\"\"\"SELECT sputteringRate FROM sampleData WHERE sampleID = ?\"\"\", (sampleID, ))\n return self.cur.fetchone()\n\n def getSampleSpecies(self, sampleID):\n self.cur.execute(\"\"\"SELECT specie FROM intSpecies WHERE sampleID = ?\"\"\", (sampleID, ))\n return self.cur.fetchall()\n\n def getSimsData(self, sampleID):\n self.cur.execute(\"\"\"SELECT simsData FROM sampleData WHERE sampleID = ?\"\"\", (sampleID, ))\n return self.cur.fetchone()\n\n def getSampleMetadata(self, sampleID):\n self.cur.execute(\"\"\"SELECT * FROM sampleData WHERE sampleID = ?\"\"\", (sampleID, ))\n return self.cur.fetchone()\n\n def getSamplesWithSpecies(self, filterQuery):\n if filterQuery == \"\":\n return []\n query = \"\"\"SELECT DISTINCT sampleID FROM intSpecies WHERE \"\"\" + filterQuery\n self.cur.execute(query)\n return self.cur.fetchall()\n\n def getSamplesWithMetadata(self, filterQuery):\n if filterQuery == \"\":\n return []\n query = \"\"\"SELECT sampleID FROM sampleData WHERE \"\"\" + filterQuery\n self.cur.execute(query)\n return self.cur.fetchall()" } ]
3
JacquesdeH/NER-CCF2020-HeapOverflow
https://github.com/JacquesdeH/NER-CCF2020-HeapOverflow
f3ccecd92bd43a05a8e20759c59a01b1452012f9
69ffe3ea91b6a79cc8ad0dcda789b6ca9806614b
2d33346d82c51955ec3244d4525a7e53285abd84
refs/heads/master
2023-02-13T00:25:55.854953
2021-01-08T13:30:33
2021-01-08T13:30:33
310,582,810
1
2
null
2020-11-06T11:47:19
2021-01-08T13:12:42
2021-01-08T13:30:33
Python
[ { "alpha_fraction": 0.4353707432746887, "alphanum_fraction": 0.5200400948524475, "avg_line_length": 35.30908966064453, "blob_id": "9e9e015798ef51712af208ce445ef3c9295927fa", "content_id": "adb2dd133ddc13967a01f9f8081b28aa575d6bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 96, "num_lines": 55, "path": "/core/preprocessor/divider.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..utils import alloc_logger\n\nclass Divider:\n def __init__(self, max_size: int):\n self.max_size = max_size\n self.base_index = max_size\n self.logger = alloc_logger(\"preprocessor.log\", Divider)\n\n def check(self, data: str, ret: list, ch: str) -> bool:\n for i in range(self.max_size):\n index = self.base_index - i\n if data[index - 1] == ch:\n ret.append(index)\n if len(data) - index <= self.max_size:\n return True\n self.base_index = self.max_size + index\n return False\n \n def detect_division(self, data: str) -> list:\n \"\"\"\n @return: ๆฏไธ€ไธช่ขซๆ‹†ๅˆ†็š„ๆฎต็š„่ตทๅง‹็š„ไธ‹ๆ ‡.\n \"\"\"\n if len(data) <= self.max_size:\n return [0]\n ret = [0]\n self.base_index = self.max_size\n while True:\n if self.check(data, ret, 'ใ€‚'):\n if len(data) - ret[-1] <= self.max_size:\n return ret\n else:\n continue\n if self.check(data, ret, '๏ผŒ'):\n if len(data) - ret[-1] <= self.max_size:\n return ret\n else:\n continue\n if self.check(data, ret, '.'):\n if len(data) - ret[-1] <= self.max_size:\n return ret\n else:\n continue\n ret.append(self.base_index)\n self.base_index = self.max_size + self.base_index\n if len(data) - ret[-1] <= self.max_size:\n return ret\n\nif __name__ == \"__main__\":\n divider = Divider(5)\n sample1 = \"01234567890123456789012345678901234567890123456789012345678901234567890123456789\"\n result1 = divider.detect_division(sample1)\n divider.logger.log_message(result1)\n sample2 = \"0ใ€‚2345678ใ€‚012345678901ใ€‚3456789012๏ผŒใ€‚5678901234567๏ผŒ901234567890123๏ผŒ567890123456789\"\n result2 = divider.detect_division(sample2)\n divider.logger.log_message(result2)" }, { "alpha_fraction": 0.5274975299835205, "alphanum_fraction": 0.53048175573349, "avg_line_length": 42.98125076293945, "blob_id": "9b4d8fdf8cb5e0a45cf0d001e6fb0f622afa913c", "content_id": "a6150de67ecd510bdc4ab5a58aa9082c5840dc57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7037, "license_type": "no_license", "max_line_length": 121, "num_lines": 160, "path": "/core/Instructor.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from .config.DefaultConfig import DefaultConfig as config\nfrom .dataloader.dataloader import CCFDataloader\nfrom .dataloader.dataloader import KFold\nimport torch\nimport torch.nn as nn\nimport os\nfrom .utils import alloc_logger\nfrom core.model.Net import Net\n\nimport json\nfrom Main import args\nfrom tqdm import tqdm\nfrom transformers import get_linear_schedule_with_warmup\n\n\nclass TempModule(nn.Module):\n def __init__(self):\n super(TempModule, self).__init__()\n self.count = 0\n\n def forward(self, words):\n self.count += 1\n return torch.zeros((config.HYPER.BATCH_SIZE, config.HYPER.SEQ_LEN, config.HYPER.LABEL_DIM))\n\n\nclass Instructor:\n def __init__(self, model_name, args):\n self.model_name = model_name\n self.args = args\n self.model = Net(self.model_name, self.args).to(self.args.device)\n self.train_log = alloc_logger('train_log')\n pass\n\n def get_loss_fn(self, reduce=None, size_average=None):\n return self.model.neg_log_likelihood_loss\n\n def get_optimizer(self, model: Net, lr=1e-3):\n # torch.optim.AdamW\n return torch.optim.Adam([{'params': model.queryParameters('base'), 'lr': self.args.base_lr},\n {'params': model.queryParameters('lstm'), 'lr': self.args.lr},\n {'params': model.queryParameters('dense'), 'lr': self.args.lr}])\n\n def get_scheduler(self, optimizer, rate, tot_iters):\n return get_linear_schedule_with_warmup(optimizer, num_warmup_steps=rate * tot_iters,\n num_training_steps=tot_iters)\n\n def save_module(self):\n print('Saving model...')\n mdl_path = os.path.join(config.PATHS.CKPT, self.model_name)\n torch.save(self.model.state_dict(), mdl_path)\n print('Successfully saved model.')\n\n def load_module(self):\n mdl_path = os.path.join(config.PATHS.CKPT, self.model_name)\n self.model.load_state_dict(torch.load(mdl_path))\n print('Loaded from trained model.')\n\n '''\n return batch_size and learning_rate\n data_content: list: batch_size\n label_content: [batch_size, seq_len] \n '''\n\n def train(self):\n n_time = self.args.n\n k_fold = self.args.k\n train_log = alloc_logger(\"train.log\", \"train\")\n train_log.log_message(\"train at n_time: %d, k_fold: %d\" % (n_time, k_fold))\n dataloader = CCFDataloader(args=self.args, in_train=True)\n loss_fn = self.get_loss_fn()\n optimizer = self.get_optimizer(self.model)\n # schedule = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)\n k_fold = KFold(dataloader=dataloader, k=k_fold)\n # for time in range(n_time):\n # total_loss = 0.\n # for fold in range(len(k_fold)):\n # trainloader = k_fold.get_train()\n # for data_content, label_content in tqdm(trainloader):\n # # label_predict = self.model(data_content)\n # loss = loss_fn(data_content, label_content)\n #\n # optimizer.zero_grad()\n # loss.backward()\n # optimizer.step()\n # print('loss={:}', format(loss.detach().cpu().item()))\n #\n # validloader = k_fold.get_valid()\n # cnt_sample = 0\n # for data_content, label_content in tqdm(validloader):\n # with torch.no_grad():\n # # label_predict = self.model(data_content)\n # loss = loss_fn(data_content, label_content)\n # total_loss += loss.sum().item()\n # cnt_sample += len(data_content)\n # print('==============================================')\n # print('Valid loss={:}'.format(total_loss/cnt_sample))\n # print('==============================================')\n #\n # k_fold.next_fold()\n # k_fold.new_k_fold()\n # train_log.log_message('total loss: %d' % total_loss)\n # loss_history.append(total_loss)\n trainloader = k_fold.get_train()\n tot_iters = k_fold.get_train_len()\n # scheduler = self.get_scheduler(optimizer, 0.1, tot_iters)\n optimizer.zero_grad()\n cumul_batch = 0\n for data_content, label_content in tqdm(trainloader):\n for _ in range(10):\n label_predict = self.model(data_content)\n print('predict:\\n' + str(label_predict))\n print('labels:\\n' + str(label_content))\n print('predict_all_max=', label_predict.max())\n batch_size = len(data_content)\n loss = loss_fn(data_content, label_content)\n\n loss_each = loss / self.args.cumul_batch\n loss_each.backward()\n cumul_batch += 1\n\n # for name_, params_ in self.model.newfc.named_parameters():\n # print(' ---Name:', name_, '---RequiresGrad:', params_.requires_grad, '---GradValue:', params_.grad)\n\n if cumul_batch >= self.args.cumul_batch:\n optimizer.step()\n # scheduler.step()\n optimizer.zero_grad()\n cumul_batch = 0\n print(\n 'loss={:} lr={:}'.format(loss.detach().cpu().item() / batch_size, optimizer.param_groups[0]['lr']))\n self.train_log.log_message('loss={:}'.format(loss.detach().cpu().item() / batch_size))\n\n validloader = k_fold.get_valid()\n cnt_sample = 0\n total_loss = 0.\n for data_content, label_content in tqdm(validloader):\n with torch.no_grad():\n # label_predict = self.model(data_content)\n loss = loss_fn(data_content, label_content)\n total_loss += loss.item()\n cnt_sample += len(data_content)\n print('==============================================')\n print('Valid loss={:}'.format(total_loss / cnt_sample))\n print('==============================================')\n self.train_log.log_message('Valid loss={:}'.format(total_loss / cnt_sample))\n\n def genTestJson(self):\n with torch.no_grad():\n dataloader = CCFDataloader(args=self.args, in_train=False)\n file_count = 0\n for data_contents in dataloader:\n predicts = self.model(data_contents)\n predicts = predicts.cpu() # [batch_size, seq_len, label_dim]\n for predict_count in range(predicts.shape[0]):\n predict = predicts[predict_count]\n result = list(map(lambda x: int(x.item()), list(predict)))\n with open(os.path.join(config.PATHS.DATA_CCF_CLEANED,\n 'test/label/%d.json' % (file_count + predict_count)), 'w') as fw:\n json.dump(result, fw)\n file_count += predicts.shape[0]\n" }, { "alpha_fraction": 0.5847831964492798, "alphanum_fraction": 0.5882887840270996, "avg_line_length": 35.68095397949219, "blob_id": "2b2367be8b2766eec2d4f18e2bac9402090b029d", "content_id": "bed091e78288d6f30c260816b93461b7b8ab061d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8980, "license_type": "no_license", "max_line_length": 125, "num_lines": 210, "path": "/core/utils/logger.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import time\nimport sys\nimport os\nimport json\nfrom ..config.DefaultConfig import DefaultConfig\n\n\n\n_default_logger = None\ntotal_file_reference = 0\ntotal_file = None\nfile_pool = {} # filename: (file, reference)\n\n\nclass Logger:\n \"\"\"\n ๆ—ฅๅฟ—็ฑป.\n ๅ„ไธชheadๅ‚ๆ•ฐ้ƒฝๅฏไปฅๆ˜ฏstr, ๆˆ–่€…ๆ˜ฏๆ‹ฅๆœ‰__name__ๅฑžๆ€ง็š„ๅฏน่ฑก(ๅฆ‚็ฑปๅ, ๅ‡ฝๆ•ฐๅ).\n ๆฏๆกๆ—ฅๅฟ—็š„ๆ ผๅผไธบ: [<time_stampe>]\\\\t@head\\\\tq\n \"\"\"\n\n def __init__(self, \n log_file_name: str, \n default_head: str or \"__name__\", \n default_mid: str, \n console_output: bool):\n \n # ๅผ•็”จๅ…จๅฑ€ๆ—ฅๅฟ—ๆ–‡ไปถ\n global total_file_reference\n global total_file\n total_file_reference += 1\n if total_file is None or total_file.closed:\n total_file = open(os.path.join(DefaultConfig.PATHS.LOG, \"total.log\"), 'a', encoding='utf8')\n \n global file_pool\n if log_file_name in file_pool:\n self._log_file = file_pool[log_file_name][0] \n file_pool[log_file_name][1] += 1\n # print(\"using log_file in pool [\", log_file_name, ']')\n else:\n self._log_file = open(os.path.join(DefaultConfig.PATHS.LOG, log_file_name), 'a', encoding='utf8')\n file_pool[log_file_name] = [self._log_file, 1]\n # print(\"opening new log_file [\", log_file_name, ']')\n\n self._log_file_name = log_file_name\n self.console_output = console_output\n self._default_mid = default_mid\n self._default_signature = self.format_signature(default_head) if default_head is not None else None\n\n def __del__(self):\n # print(\"deleting logger [file=\", self._log_file_name, \", head=\", self._default_signature, \"]\")\n\n global file_pool\n file_pool[self._log_file_name][1] -= 1\n if file_pool[self._log_file_name][1] == 0:\n self._log_file.write(\"\\n\\n\\n\")\n self._log_file.close()\n # print(\"closing log_file [\", self._log_file_name, ']')\n del file_pool[self._log_file_name]\n\n # ่งฃ้™คๅ…จๅฑ€ๆ—ฅๅฟ—ๆ–‡ไปถ็š„ๅผ•็”จ\n global total_file_reference\n global total_file\n \n total_file_reference -= 1\n if total_file_reference == 0:\n total_file.close()\n # print(\"closing total_log_file\")\n \n\n \n @staticmethod\n def get_time_stampe():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n \n @staticmethod\n def get_fs_legal_time_stampe():\n return time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.localtime(time.time()))\n\n @staticmethod\n def format_signature(signature) -> str or None:\n if isinstance(signature, str):\n return signature\n else:\n return signature.__name__\n\n def format_msg(self, *msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, end:str='\\n'):\n \"\"\"\n ๅฝ“ head ไธŽ mid ไธบ None ๆ—ถ, ๅฐ†ไฝฟ็”จๅˆ›ๅปบๆœฌloggerๆ—ถๆŒ‡ๅฎš็š„้ป˜่ฎคๅ€ผ, ่‹ฅ้ป˜่ฎคๅ€ผไปไธบNone, ๅˆ™ไธบ็ฉบๅญ—็ฌฆไธฒ.\n ไธไผšๅœจ็ป“ๅฐพๆทปๅŠ  '\\\\n'.\n\n @param *msg: ไธ€ๅˆ—ๅฏไปฅ้€š่ฟ‡str()่ฝฌๆขไธบๅญ—็ฌฆไธฒ็š„ๅฏน่ฑก, ๅฐ†้€š่ฟ‡midๅฑžๆ€ง่ฟžๆŽฅ;\n @param head: ๅคด้ƒจ, ไปฅ @xxx ๅฝขๅผๆทปๅŠ ๅˆฐๆ—ถ้—ดๆˆณไน‹ๅŽ, head้œ€่ฆๆ˜ฏไธ€ไธชๅญ—็ฌฆไธฒๆˆ–่€…ๆ‹ฅๆœ‰__name__ๅฑžๆ€ง็š„ๅฏน่ฑก;\n @param mid: ่ฟžๆŽฅ msg ๅ„ไธชๅ†…ๅฎน็š„่ฟžๆŽฅ็ฌฆ;\n \"\"\"\n time_stampe = self.get_time_stampe()\n total_msg = '[' + time_stampe + ']\\t'\n if head != None:\n fmt_signature = self.format_signature(head)\n total_msg += '@' + fmt_signature + ':\\t'\n elif self._default_signature != None:\n total_msg += '@' + self._default_signature + ':\\t'\n if mid is None:\n content = self._default_mid.join(str(m) for m in msg)\n else:\n content = mid.join(str(m) for m in msg)\n total_msg += content\n return total_msg\n\n def console_message(self, *msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, end:str='\\n'):\n \"\"\"\n ๅ‘stdout่พ“ๅ‡บไฟกๆฏ.\n ๅฐ†่ฝฌๅ‘ msg, head, mid ่‡ณ format_msg() ๅ‡ฝๆ•ฐ่ฟ›่กŒๆ ผๅผๅŒ–, ๅ…ทไฝ“ๅฆ‚ไธ‹:\n ๅฝ“ head ไธŽ mid ไธบ None ๆ—ถ, ๅฐ†ไฝฟ็”จๅˆ›ๅปบๆœฌloggerๆ—ถๆŒ‡ๅฎš็š„้ป˜่ฎคๅ€ผ, ่‹ฅ้ป˜่ฎคๅ€ผไปไธบNone, ๅˆ™ไธบ็ฉบๅญ—็ฌฆไธฒ\n\n @param *msg: ไธ€ๅˆ—ๅฏไปฅ้€š่ฟ‡str()่ฝฌๆขไธบๅญ—็ฌฆไธฒ็š„ๅฏน่ฑก, ๅฐ†้€š่ฟ‡midๅฑžๆ€ง่ฟžๆŽฅ;\n @param head: ๅคด้ƒจ, ไปฅ @xxx ๅฝขๅผๆทปๅŠ ๅˆฐๆ—ถ้—ดๆˆณไน‹ๅŽ, head้œ€่ฆๆ˜ฏไธ€ไธชๅญ—็ฌฆไธฒๆˆ–่€…ๆ‹ฅๆœ‰__name__ๅฑžๆ€ง็š„ๅฏน่ฑก;\n @param mid: ่ฟžๆŽฅ msg ๅ„ไธชๅ†…ๅฎน็š„่ฟžๆŽฅ็ฌฆ;\n @param end: ็ป“ๅฐพ็š„็ฌฆๅท, ไป…ๅฏนconsoleๅ†…ๅฎนๆœ‰ๆ•ˆ, ๅ†™ๅ…ฅๆ—ฅๅฟ—ๆ–‡ไปถๆ—ถๅฟ…ๅฎšไปฅๅ›ž่ฝฆ็ป“ๅฐพ;\n \"\"\"\n total_msg = self.format_msg(*msg, head=head, mid=mid)\n print(total_msg, end=end)\n\n def file_message(self, *msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, need_total:bool=True):\n \"\"\"\n ๅ‘ๆ—ฅๅฟ—ๆ–‡ไปถๅ†™ๅ…ฅไฟกๆฏ.\n ๅฐ†่ฝฌๅ‘ msg, head, mid ่‡ณ format_msg() ๅ‡ฝๆ•ฐ่ฟ›่กŒๆ ผๅผๅŒ–, ๅ…ทไฝ“ๅฆ‚ไธ‹:\n ๅฝ“ head ไธŽ mid ไธบ None ๆ—ถ, ๅฐ†ไฝฟ็”จๅˆ›ๅปบๆœฌloggerๆ—ถๆŒ‡ๅฎš็š„้ป˜่ฎคๅ€ผ, ่‹ฅ้ป˜่ฎคๅ€ผไปไธบNone, ๅˆ™ไธบ็ฉบๅญ—็ฌฆไธฒ\n\n @param *msg: ไธ€ๅˆ—ๅฏไปฅ้€š่ฟ‡str()่ฝฌๆขไธบๅญ—็ฌฆไธฒ็š„ๅฏน่ฑก, ๅฐ†้€š่ฟ‡midๅฑžๆ€ง่ฟžๆŽฅ;\n @param head: ๅคด้ƒจ, ไปฅ @xxx ๅฝขๅผๆทปๅŠ ๅˆฐๆ—ถ้—ดๆˆณไน‹ๅŽ, head้œ€่ฆๆ˜ฏไธ€ไธชๅญ—็ฌฆไธฒๆˆ–่€…ๆ‹ฅๆœ‰__name__ๅฑžๆ€ง็š„ๅฏน่ฑก;\n @param mid: ่ฟžๆŽฅ msg ๅ„ไธชๅ†…ๅฎน็š„่ฟžๆŽฅ็ฌฆ;\n @param end: ็ป“ๅฐพ็š„็ฌฆๅท, ไป…ๅฏนconsoleๅ†…ๅฎนๆœ‰ๆ•ˆ, ๅ†™ๅ…ฅๆ—ฅๅฟ—ๆ–‡ไปถๆ—ถๅฟ…ๅฎšไปฅๅ›ž่ฝฆ็ป“ๅฐพ;\n @param need_total: ๆ˜ฏๅฆ้œ€่ฆๅ‘ๅ…จๅฑ€ๆ—ฅๅฟ—ๆ–‡ไปถๅ†™ๅ…ฅ;\n \"\"\"\n global total_file\n total_msg = self.format_msg(*msg, head=head, mid=mid)\n self._log_file.write(total_msg + '\\n')\n if need_total:\n total_file.write(total_msg + '\\n')\n\n def log_message(self, *msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, end:str='\\n'): \n \"\"\"\n ๅ‘ๆ—ฅๅฟ—ๆ–‡ไปถๅ†™ๅ…ฅไฟกๆฏ, ๅฆ‚ๆžœๅˆ›ๅปบLoggerๆ—ถconsole_outputไธบTrue, ๅˆ™ๅŒๆ—ถๅ‘stdout่พ“ๅ‡บ็›ธๅŒ็š„ไฟกๆฏ.\n ๅฐ†่ฝฌๅ‘ msg, head, mid ่‡ณ format_msg() ๅ‡ฝๆ•ฐ่ฟ›่กŒๆ ผๅผๅŒ–, ๅ…ทไฝ“ๅฆ‚ไธ‹:\n ๅฝ“ head ไธŽ mid ไธบ None ๆ—ถ, ๅฐ†ไฝฟ็”จๅˆ›ๅปบๆœฌloggerๆ—ถๆŒ‡ๅฎš็š„้ป˜่ฎคๅ€ผ, ่‹ฅ้ป˜่ฎคๅ€ผไปไธบNone, ๅˆ™ไธบ็ฉบๅญ—็ฌฆไธฒ\n\n @param *msg: ไธ€ๅˆ—ๅฏไปฅ้€š่ฟ‡str()่ฝฌๆขไธบๅญ—็ฌฆไธฒ็š„ๅฏน่ฑก, ๅฐ†้€š่ฟ‡midๅฑžๆ€ง่ฟžๆŽฅ;\n @param head: ๅคด้ƒจ, ไปฅ @xxx ๅฝขๅผๆทปๅŠ ๅˆฐๆ—ถ้—ดๆˆณไน‹ๅŽ, head้œ€่ฆๆ˜ฏไธ€ไธชๅญ—็ฌฆไธฒๆˆ–่€…ๆ‹ฅๆœ‰__name__ๅฑžๆ€ง็š„ๅฏน่ฑก;\n @param mid: ่ฟžๆŽฅ msg ๅ„ไธชๅ†…ๅฎน็š„่ฟžๆŽฅ็ฌฆ;\n @param end: ็ป“ๅฐพ็š„็ฌฆๅท, ไป…ๅฏนconsoleๅ†…ๅฎนๆœ‰ๆ•ˆ, ๅ†™ๅ…ฅๆ—ฅๅฟ—ๆ–‡ไปถๆ—ถๅฟ…ๅฎšไปฅๅ›ž่ฝฆ็ป“ๅฐพ;\n \"\"\"\n global total_file\n total_msg = self.format_msg(*msg, head=head, mid=mid)\n if self.console_output:\n print(total_msg, end=end)\n self._log_file.write(total_msg + '\\n')\n total_file.write(total_msg + '\\n')\n\n\n\n\ndef alloc_logger(log_file_name: str=None, default_head: str or \"__name__\"=None, default_mid:str='',console_output:bool=True):\n \"\"\"\n ๅˆ›ๅปบไธ€ไธชLogger.\n ๅฝ“log_file_nameไธบ็ฉบๆ—ถ, ๅฐ†่ฟ”ๅ›ž้ป˜่ฎค็š„logger, ่ฏฅloggerๅชๆœ‰ไธ€ไธชๅฎžไพ‹.\n log_file_name ๆ˜ฏ็›ธๅฏนไบŽ log ๆ–‡ไปถๅคน็š„็›ฎๅฝ•\n \"\"\"\n global _default_logger\n if log_file_name == None:\n if _default_logger is None:\n log_file_name = DefaultConfig.LOG.DEFAULT_LOG_DIR\n signature = DefaultConfig.LOG.DEFAULT_HEAD\n mid = DefaultConfig.LOG.DEFAULT_MID\n need_console = DefaultConfig.LOG.DEFAULT_NEED_CONSOLE\n _default_logger = Logger(log_file_name, signature, mid, need_console)\n return _default_logger\n ret = Logger(log_file_name, default_head, default_mid, console_output)\n return ret\n \ndef log_message(*msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, end:str='\\n'): \n \"\"\"\n ไปฃ็†้ป˜่ฎคlogger็š„log_message.\n \"\"\"\n alloc_logger().log_message(*msg, head=head, mid=mid, end=end)\n\ndef file_message(*msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None):\n \"\"\"\n file_message.\n \"\"\"\n alloc_logger().file_message(*msg, head=head, mid=mid)\n\ndef console_message(*msg:\"can to str\", head:str or \"__name__\"=None, mid:str=None, end:str='\\n'):\n \"\"\"\n ไปฃ็†้ป˜่ฎคlogger็š„console_message.\n \"\"\"\n alloc_logger().console_message(*msg, head=head, mid=mid, end=end)\n\n\nif __name__ == \"__main__\":\n print(DefaultConfig.PATHS.LOG)\n log_message(\"test\", \"default\", \"log_message\", mid=' ')\n log_message(1, 2, 3, 4, (1,2), mid='\\t')\n logger1 = alloc_logger()\n logger1.log_message(\"test logger\")\n logger1.log_message(\"test string signature\", head=__file__)\n logger1.log_message(\"test class signature\", head=Logger)\n logger1.log_message(\"test func signature\", head=alloc_logger)\n logger1.log_message(\"test\", \" multi\", \" msg\")\n logger1.log_message(\"test\", \"multi\", \"msg\", \"with\", \"mid\", mid=' ')" }, { "alpha_fraction": 0.4825192093849182, "alphanum_fraction": 0.4884701073169708, "avg_line_length": 37.778846740722656, "blob_id": "e602f479da19c055dc0bed471a8bb87256355474", "content_id": "081567a3a42a3445f147dc6c8866dc9f6dd90f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4045, "license_type": "no_license", "max_line_length": 118, "num_lines": 104, "path": "/core/preprocessor/mismatch_detector.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..utils import alloc_logger\nfrom .label_file_reader import LabelFileReader\nfrom .label_file_reader import LabelInfo\nfrom ..config.DefaultConfig import DefaultConfig\nimport json\nimport os\nfrom ..utils import alloc_logger\n\n\n\nclass MismatchDetector:\n def __init__(self, mismatch_file_dir: str=None):\n self.mismatch_count = 0\n self.fix_count = 0\n self.logger = alloc_logger(\"detectors.log\", MismatchDetector)\n self.mismatch_file_dir = mismatch_file_dir if mismatch_file_dir is not None else DefaultConfig.PATHS.DATA_INFO\n self.reader = LabelFileReader()\n \n \"\"\"\n {\n <ID> : {\n \"solved\": \"<true/false>\",\n \"data\": \"<data>\",\n \"labels\": [\n <str-type LabelInfo>\n ]\n }\n }\n \"\"\"\n self.tactics = None\n with open(self.mismatch_file_dir + \"/mismatch_tactics.json\", 'r', encoding='utf8') as f:\n self.tactics = json.load(f) \n\n def save(self):\n with open(self.mismatch_file_dir + \"/mismatch_tactics.json\", 'w', encoding='utf8') as f:\n json.dump(self.tactics, f, ensure_ascii=False)\n\n # def __del__(self):\n # self.save()\n\n @staticmethod\n def remove_special_char(data: str)->str:\n return data \\\n .replace('\\n', '') \\\n .replace('\\r', '') \\\n .replace(' ', '') \\\n .replace('\\b', '') \\\n .replace('\\v', '') \\\n .replace('\\f', '') \\\n .replace('\\u2028', '') \\\n .replace('\\u2029', '') \\\n .replace('\\u20A0', '') \\\n .replace('\\uFEFF', '')\n\n def fix_mismatch(self, data:str, infos:\"Iterable[LabelInfo]\", remove_spacial=True) -> (str, \"List[LabelInfo]\"):\n new_data = self.remove_special_char(data) if remove_spacial else data\n reader = self.reader\n for no, info in enumerate(infos):\n if new_data[info.Pos_b : info.Pos_e + 1] != info.Privacy:\n self.mismatch_count += 1\n if str(info.ID) in self.tactics.keys():\n tac = self.tactics[str(info.ID)]\n if tac[\"solved\"] == \"true\":\n self.fix_count += 1\n new_new_data = tac[\"data\"]\n new_infos = tac[\"labels\"]\n new_infos = list(map(reader.loads, new_infos))\n # ๆฃ€ๆŸฅๆ˜ฏๅฆๆญฃ็กฎ\n for info in new_infos:\n if new_new_data[info.Pos_b : info.Pos_e + 1] != info.Privacy:\n tac[\"solved\"] = \"false\"\n self.logger.log_message(\"mismatch not solve with id={:d}\".format(infos[0].ID))\n return (None, None)\n self.logger.log_message(\"solve a mismatch with id={:d}\".format(infos[0].ID))\n return (new_new_data, new_infos)\n self.logger.log_message(\"detect a mismatch with id={:d}\".format(infos[0].ID))\n msg = {\n \"solved\": \"false\",\n \"data\" : new_data,\n \"labels\" : list(map(reader.dumps, infos)),\n \"no\" : no\n }\n self.tactics[str(info.ID)] = msg\n return (None, None)\n return (new_data, infos)\n\n\nif __name__ == \"__main__\":\n reader = LabelFileReader()\n detector = MismatchDetector()\n\n data_dir = DefaultConfig.PATHS.DATA_CCF_RAW + '/train/data'\n label_dir = DefaultConfig.PATHS.DATA_CCF_RAW + '/train/label'\n\n data_count = len(os.listdir(data_dir))\n\n for i in range(data_count):\n with open(data_dir + \"/{:d}.txt\".format(i), 'r', encoding='utf8') as f:\n data = f.read()\n with open(label_dir + \"/{:d}.csv\".format(i), 'r', encoding='utf8') as f:\n infos = reader.load(f)\n detector.fix_mismatch(data, infos)\n\n detector.save()\n" }, { "alpha_fraction": 0.5251028537750244, "alphanum_fraction": 0.5432098507881165, "avg_line_length": 27.255813598632812, "blob_id": "679835640323d5218b803b7ac4e9e91165214f3b", "content_id": "b2044211160f34f29c90364567c63d56d3e7626e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 74, "num_lines": 43, "path": "/core/config/DefaultConfig.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import os\n\n\nclass DefaultConfig:\n class PATHS:\n IMAGE = os.path.join(\"image\")\n LOG = os.path.join(\"log\")\n CKPT = os.path.join(\"checkpoint\")\n DATA = os.path.join(\"data\")\n DATA_CCF = os.path.join(DATA, os.path.join(\"CCF\"))\n DATA_CCF_RAW = os.path.join(DATA_CCF, os.path.join(\"raw\"))\n DATA_CCF_CLEANED = os.path.join(DATA_CCF, os.path.join(\"cleaned\"))\n DATA_MODULE = os.path.join(DATA, os.path.join(\"module\"))\n\n # ็”จไบŽdebugๆ—ถๅญ˜ๆ”พ่พ“ๅ‡บๆ–‡ไปถ็š„่ทฏๅพ„๏ผŒๅผบ็ƒˆๅปบ่ฎฎgitignore\n DATA_CCF_DBG = os.path.join(DATA_CCF, os.path.join(\"debug\"))\n\n DATA_INFO = os.path.join(DATA, os.path.join(\"info\"))\n\n # ๅญ˜ๆ”พๆ ผๅผๅŒ–ๆ ‡็ญพ็š„่ทฏๅพ„\n DATA_CCF_FMT = os.path.join(DATA_CCF, os.path.join(\"formatted\"))\n\n class HYPER:\n PRETRAINED = 'bert-base-chinese'\n BATCH_SIZE = 8\n CUMUL_BATCH = 4\n SEQ_LEN = 256\n EMBED_DIM = 768\n LSTM_HIDDEN = 256\n LSTM_DIRECTS = 2\n LSTM_LAYERS = 2\n LABEL_DIM = 47\n EPOCH = 3\n LR = 1e-4\n BASE_LR = 1e-6\n N = 1\n K = 4\n\n class LOG:\n DEFAULT_LOG_DIR = \"default.log\"\n DEFAULT_HEAD = ''\n DEFAULT_MID = ''\n DEFAULT_NEED_CONSOLE = True\n" }, { "alpha_fraction": 0.5792672634124756, "alphanum_fraction": 0.5831504464149475, "avg_line_length": 46.391998291015625, "blob_id": "618ae5f677f9680d920cb55ff3ae9e45f7138dfd", "content_id": "a1ebf415cbb40ada58d56bd977f8b598def94e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5923, "license_type": "no_license", "max_line_length": 149, "num_lines": 125, "path": "/core/preprocessor/result_formatter.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..utils import alloc_logger\nfrom ..config.DefaultConfig import DefaultConfig\nfrom .label_transformer import LabelTrasformer\nfrom .label_formatter import LabelFormatter\nfrom .label_file_reader import LabelFileReader\nfrom .label_file_reader import LabelInfo\nimport os\nimport json\n\nclass ResultFormatter:\n def __init__(self, split_index_file: str=None, end='\\n'):\n super().__init__()\n \n self.logger = alloc_logger(\"result_formatter.log\", ResultFormatter)\n self.end = end\n self.combine_index = {} # origin: (beg, target)\n split_index_file = split_index_file if split_index_file is not None else os.path.join(DefaultConfig.PATHS.DATA_INFO, \"split_index_test.json\")\n \n self.logger.log_message(\"loading split_index from file:\\t\", split_index_file)\n with open(split_index_file, 'r', encoding='utf8') as f:\n m = json.load(f)\n for item in m:\n target = item[\"target\"]\n origin = item[\"origin\"]\n beg = item[\"beg\"]\n if origin in self.combine_index.keys():\n self.combine_index[origin].append((beg, target))\n else:\n self.combine_index[origin] = [(beg, target)]\n\n def combine_all(self, origin_data_count: int=-1, label_dir: str=None, data_dir: str=None):\n signature = \"conbine_all()\\t\"\n\n self.logger.log_message(signature, \"start!\")\n\n label_dir = label_dir if label_dir is not None else os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"test/label\")\n data_dir = data_dir if data_dir is not None else os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"test/data\")\n self.logger.log_message(signature, \"origin data dir:\\t\", data_dir)\n self.logger.log_message(signature, \"origin label dir:\\t\", label_dir)\n\n origin_data_count = origin_data_count if origin_data_count >= 0 else len(os.listdir(os.path.join(DefaultConfig.PATHS.DATA_CCF_RAW, \"test\")))\n self.logger.log_message(signature, \"origin data count:\\t\", origin_data_count)\n\n\n label_formatter = LabelFormatter()\n label_formatter.load_transformer_from_file()\n\n reader = LabelFileReader()\n\n output_csv = open(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"predict_origin.csv\"), 'w', encoding='utf8')\n\n for i in range(origin_data_count):\n # labels = None\n with open(os.path.join(label_dir, \"{:d}.json\".format(i)), 'r', encoding='utf8') as f:\n labels = json.load(f)\n with open(os.path.join(data_dir, \"{:d}.txt\".format(i)), 'r', encoding='utf8') as f:\n data = f.read()\n if i in self.combine_index.keys():\n targets = self.combine_index[i]\n targets.sort(key=lambda t: t[0])\n for _, target in targets:\n with open(os.path.join(label_dir, \"{:d}.json\".format(target)), 'r', encoding='utf8') as f:\n new_labels = json.load(f)\n with open(os.path.join(data_dir, \"{:d}.txt\".format(target)), 'r', encoding='utf8') as f:\n new_data = f.read()\n labels += new_labels\n data += new_data\n infos = label_formatter.integer_list_label_and_data_to_infos(ID=i, integer_list=labels, data = data)\n for info in infos:\n string = reader.dumps(info)\n output_csv.write(string + self.end)\n output_csv.close()\n\n def trans_origin_to_raw(self, data_dir: str=None):\n signature = \"trans_origin_to_raw()\\t\"\n\n input_csv = open(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"predict_origin.csv\"), 'r', encoding='utf8')\n output_csv = open(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"predict.csv\"), 'w', encoding='utf8')\n\n reader = LabelFileReader()\n\n data_dir = data_dir if data_dir is not None else os.path.join(DefaultConfig.PATHS.DATA_CCF_RAW, \"test\")\n\n for line in input_csv.readlines():\n info = reader.loads(line)\n content = info.Privacy\n beg = info.Pos_b\n end = info.Pos_e\n ID = info.ID\n with open(os.path.join(data_dir, \"{:d}.txt\".format(ID)), 'r', encoding='utf8') as f:\n raw_content = f.read()\n new_beg = raw_content.find(content, max(beg - 5, 0), min(end + 5, len(raw_content)))\n if new_beg < 0:\n self.logger.log_message(signature, \"in:\\t\", content in raw_content)\n # print(max(beg - 5, 0))\n # print(min(end + 5, len(raw_content)))\n print(len(raw_content))\n # self.logger.log_message(signature, \"content:\\t\", content)\n self.logger.log_message(signature, \"raw_content:\\t\", raw_content)\n self.logger.log_message(signature, \"content not found in raw:\\t\", reader.dumps(info))\n continue\n new_end = new_beg + end - beg\n new_info = LabelInfo(\n ID = ID,\n Category = info.Category,\n Pos_b = new_beg,\n Pos_e = new_end,\n Privacy = content\n )\n new_line = reader.dumps(new_info)\n output_csv.write(new_line + self.end)\n\n input_csv.close()\n output_csv.close()\n\nif __name__ == \"__main__\":\n # formatter = ResultFormatter(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"split_index_train.json\"))\n # formatter.combine_all(\n # origin_data_count=2515, \n # label_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train/label\"),\n # data_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train/data\"))\n # formatter.trans_origin_to_raw(data_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_RAW, \"train/data\"))\n formatter = ResultFormatter()\n formatter.combine_all()\n formatter.trans_origin_to_raw()" }, { "alpha_fraction": 0.5413929224014282, "alphanum_fraction": 0.5462111234664917, "avg_line_length": 31.542856216430664, "blob_id": "086a351efc23a24d8dbcff627d69ab9941e2c8be", "content_id": "bbf6a8ed741e898dd860a98fbca5513b4fcac8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 118, "num_lines": 70, "path": "/core/preprocessor/label_file_reader.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import re\nimport sys\nfrom collections import namedtuple\nfrom ..utils.logger import alloc_logger\nfrom ..config.DefaultConfig import DefaultConfig\n\n\"\"\"\nnamed_tuple LabelInfo:\nID: int\nCategory: str\nPos_b: int\nPos_e: int\nProvacy: str\n\"\"\"\nLabelInfo = namedtuple(\"LabelInfo\", [\"ID\", \"Category\", \"Pos_b\", \"Pos_e\", \"Privacy\"])\n\nclass LabelFileReader:\n def __init__(self):\n self.csv_re = re.compile(r\"^(\\d+),(\\w+),(\\d+),(\\d+),(.+)$\")\n self.logger = alloc_logger(\"label_file_reader.log\", default_head=LabelFileReader)\n \n def loads(self, line_content) -> LabelInfo:\n m = self.csv_re.match(line_content)\n if m == None:\n self.logger.log_message(\"loads()\\t:\", \"content cannot match pattern:\", line_content)\n return None\n ID = int(m.group(1))\n Category = m.group(2)\n Pos_b = int(m.group(3))\n Pos_e = int(m.group(4))\n Privacy = m.group(5)\n ret = LabelInfo(\n ID = ID,\n Category = Category,\n Pos_b = Pos_b,\n Pos_e = Pos_e,\n Privacy = Privacy\n )\n return ret\n\n\n def load(self, fp) -> \"List[LabelInfo]\":\n ret = []\n for row, line_content in enumerate(fp.readlines()):\n if row == 0:\n continue\n if len(line_content) == 0 or line_content == '\\n':\n continue\n new_info = self.loads(line_content)\n if new_info is not None:\n ret.append(new_info)\n self.logger.file_message(\"load():\\t\", \"infos:\\t\", ret)\n return ret\n \n def dumps(self, info: LabelInfo):\n return str(info.ID) + ',' + info.Category + ',' + str(info.Pos_b) + ',' + str(info.Pos_e) + ',' + info.Privacy\n\n def dump(self, infos : \"List[LabelInfo]\", fp):\n header = \"ID,Category,Pos_b,Pos_e,Privacy\"\n fp.write(header + '\\n')\n for info in infos:\n fp.write(self.dumps(info) + '\\n')\n\nif __name__ == \"__main__\":\n reader = LabelFileReader()\n with open(DefaultConfig.PATHS.DATA_CCF_RAW + \"/train/label/0.csv\", 'r', encoding='utf8') as f:\n infos = reader.load(f)\n print(reader.dumps(infos[0]))\n with open(\"label_file_reader.debug\", 'w', encoding='utf8') as f:\n reader.dump(infos, f)\n \n" }, { "alpha_fraction": 0.5610302090644836, "alphanum_fraction": 0.5683890581130981, "avg_line_length": 45.30370330810547, "blob_id": "861ff413a3694f19ef28bd084e98e9a391f14bcc", "content_id": "c83a7a9ba78c5e8a27a4723c09eb83ec97f20156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6327, "license_type": "no_license", "max_line_length": 120, "num_lines": 135, "path": "/core/model/Net.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "# _*_coding:utf-8_*_\n# Author : JacquesdeH\n# Create Time : 2020/11/27 10:30\n# Project Name: NER-CCF2020-HeapOverflow\n# File : Net.py\n# --------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nfrom transformers import AutoTokenizer, AutoModel\nfrom keras.preprocessing.sequence import pad_sequences\nfrom torchcrf import CRF\n\n\nclass Net(nn.Module):\n def __init__(self, model_name, args):\n super(Net, self).__init__()\n self.model_name = model_name\n self.args = args\n self.tokenizer = AutoTokenizer.from_pretrained(self.args.pretrained)\n self.pretrain_base = AutoModel.from_pretrained(self.args.pretrained).to(self.args.device)\n self.lstm = nn.LSTM(input_size=self.args.embed_dim, hidden_size=self.args.lstm_hidden,\n num_layers=self.args.lstm_layers, batch_first=True,\n bidirectional=self.args.lstm_directs == 2).to(self.args.device)\n if self.args.lstm_directs:\n self.lstm_directs = 2\n else:\n self.lstm_directs = 1\n\n self.fc1 = nn.Sequential(nn.Linear(in_features=self.lstm_directs * self.args.lstm_hidden,\n out_features=self.lstm_directs * self.args.lstm_hidden),\n # nn.BatchNorm\n nn.ReLU(),\n nn.Linear(in_features=self.lstm_directs * self.args.lstm_hidden,\n out_features=self.lstm_directs * self.args.lstm_hidden),\n # nn.BatchNorm\n nn.ReLU(),\n nn.Linear(in_features=self.lstm_directs * self.args.lstm_hidden,\n out_features=self.args.lstm_hidden),\n nn.Sigmoid()).to(self.args.device)\n\n self.dropout = nn.Dropout(0.4).to(self.args.device)\n\n self.fc2 = nn.Sequential(nn.Linear(in_features=self.args.lstm_hidden, out_features=self.args.lstm_hidden),\n # nn.BatchNorm\n nn.ReLU(),\n nn.Linear(in_features=self.args.lstm_hidden, out_features=self.args.lstm_hidden),\n # nn.BatchNorm\n nn.ReLU(),\n nn.Linear(in_features=self.args.lstm_hidden, out_features=self.args.label_dim),\n nn.Sigmoid()).to(self.args.device)\n\n self.newfc = nn.Sequential(\n nn.Linear(in_features=self.lstm_directs * self.args.lstm_hidden, out_features=self.args.lstm_hidden),\n nn.ReLU(),\n nn.Linear(in_features=self.args.lstm_hidden, out_features=self.args.label_dim)\n ).to(self.args.device)\n\n self.crf = CRF(num_tags=self.args.label_dim, batch_first=True).to(self.args.device)\n\n def pad(self, a, l=128):\n return a + [0] * (l - len(a))\n\n def get_output_score(self, texts: list):\n batch_size = len(texts)\n input_ids = [self.tokenizer.encode(text, add_special_tokens=True, max_length=self.args.seq_len, truncation=True)\n for text in texts]\n input_ids = pad_sequences(input_ids, maxlen=self.args.seq_len, dtype=\"long\",\n value=0, truncating=\"post\", padding=\"post\")\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n attention_masks = (input_ids > 0).type(torch.long)\n\n input_ids, attention_masks = input_ids.to(self.args.device), attention_masks.to(self.args.device)\n embeddings, pools = self.pretrain_base(input_ids, attention_mask=attention_masks)\n\n h = torch.randn(self.args.lstm_layers * self.args.lstm_directs, batch_size, self.args.lstm_hidden).to(\n self.args.device)\n c = torch.randn(self.args.lstm_layers * self.args.lstm_directs, batch_size, self.args.lstm_hidden).to(\n self.args.device)\n\n # embeddings -> [batch, seq_len, embed_dim]\n lstm_out, (_, _) = self.lstm(embeddings, (h, c))\n # lstm_out -> [batch, seq_len, lstm_hidden * lstm_directs]\n lstm_out = lstm_out.contiguous().view(-1, self.lstm_directs * self.args.lstm_hidden)\n # fc1_out = self.fc1(lstm_out)\n # fc2_out = self.fc2(self.dropout(fc1_out))\n # fc2_out -> [batch * seq_len, label_dim]\n\n fc2_out = self.newfc(lstm_out)\n\n lstm_emissions = fc2_out.contiguous().view(batch_size, self.args.seq_len, -1)\n # lstm_emissions -> [batch, seq_len, label_dim]\n return lstm_emissions, attention_masks\n\n def forward(self, texts: list):\n lstm_feats, attention_masks = self.get_output_score(texts)\n tag_seq = self.crf.decode(emissions=lstm_feats.float(), mask=attention_masks.bool())\n # scores, tag_seq = self.crf._viterbi_decode(feats=lstm_feats)\n # tag_seq = list(map(self.pad, tag_seq))\n for i in range(len(tag_seq)):\n tag_seq[i] = self.pad(tag_seq[i], self.args.seq_len)\n\n return torch.tensor(tag_seq, dtype=torch.long)\n\n def neg_log_likelihood_loss(self, texts, tags):\n lstm_feats, mask = self.get_output_score(texts)\n lstm_feats, mask = lstm_feats.transpose(0, 1), mask.transpose(0, 1)\n tags = tags.transpose(0, 1)\n\n numerator = self.crf._compute_score(lstm_feats, tags, mask.bool())\n # shape: (batch_size,)\n denominator = self.crf._compute_normalizer(lstm_feats, mask.bool())\n # shape: (batch_size,)\n llh = torch.log(numerator) - denominator\n # llh = numerator - denominator\n return -llh.sum() / mask.float().sum()\n # return (denominator - numerator).mean()\n\n def queryParameters(self, layer: str):\n if layer == 'base':\n return self.pretrain_base.parameters()\n elif layer == 'lstm':\n return self.lstm.parameters()\n elif layer == 'dense':\n return self.newfc.parameters()\n\n\nif __name__ == '__main__':\n import sys\n\n sys.path.append('../../')\n from Main import args\n\n net = Net('Baseline', args=args)\n tags = net([\"ไฝ ๆ˜ฏๅ‚ป้€ผๅ—, ๆˆ‘ๅŽป?\", \"ๅพˆ้ซ˜ๅ…ด่งๅˆฐไฝ , ๆˆ‘็š„ๅๅญ—ๆ˜ฏๅฐ่Šฑ, ไป€ไนˆๆ—ถๅ€™ๅ‡บๅŽปๅ–ไธ€ๆฏ\", \"ๅคงๅฎถไธ€่ตทๆ”พๅฑๅฅฝไธๅฅฝ\"])\n" }, { "alpha_fraction": 0.5296617746353149, "alphanum_fraction": 0.5344430804252625, "avg_line_length": 48.06956481933594, "blob_id": "b771f4046154020af9e7f6bceb46b009036b932c", "content_id": "72ad32ef5064712152bf0a5d65c4ed58e9e4fb5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5731, "license_type": "no_license", "max_line_length": 160, "num_lines": 115, "path": "/core/preprocessor/duplication_detector.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import re\nimport os\nimport sys\nfrom ..config.DefaultConfig import DefaultConfig\nfrom .label_file_reader import LabelFileReader\nfrom .label_file_reader import LabelInfo\nfrom ..utils import alloc_logger\nfrom ..utils.logger import Logger\n\n\nclass DuplicationDetector:\n def __init__(self, raw_data_set_dir: str = None, duplication_info_dir: str = None, dup_cleaned_set_dir: str = None, console_output: bool=True):\n self.raw_data_set_dir = raw_data_set_dir if raw_data_set_dir is not None else DefaultConfig.PATHS.DATA_CCF_RAW\n self.duplication_info_dir = duplication_info_dir if duplication_info_dir is not None else DefaultConfig.PATHS.DATA_INFO\n self.dup_cleaned_set_dir = dup_cleaned_set_dir if dup_cleaned_set_dir is not None else DefaultConfig.PATHS.DATA_CCF_DBG + \"/duplication_cleaned\"\n self.duplication_list = [] # list of tuple\n self.logger = alloc_logger(\"detectors.log\", DuplicationDetector, console_output=console_output)\n\n def detect(self):\n raw_data_count = len(os.listdir(self.raw_data_set_dir + \"/train/data\"))\n self.logger.log_message(\"detect():\", \"raw data count:\\t\", raw_data_count)\n reader = LabelFileReader()\n with open(self.duplication_info_dir + '/duplication.md', 'w', encoding='utf8') as dupfile:\n dupfile.write(\"# duplications\\n\\n\")\n dup_count = 0\n for i in range(raw_data_count):\n data_file_name = self.raw_data_set_dir + \"/train/data/\" + str(i) + \".txt\"\n label_file_name = self.raw_data_set_dir + \"/train/label/\" + str(i) + \".csv\"\n with open(label_file_name, 'r', encoding='utf8') as f:\n infos = reader.load(f)\n infos.sort(key=lambda info: info.Pos_b)\n # print(infos)\n for j in range(len(infos) - 1):\n if infos[j].Pos_e >= infos[j + 1].Pos_b:\n self.logger.log_message(\"detect():\\t\", \"detect duplication with id={:d}\".format(infos[j].ID))\n dup_count += 1\n dupfile.write(\"---\\n\\n\")\n dupfile.write(\"## {:d}\\n\\n\".format(dup_count))\n dupfile.write(\"ID=[{:d}]\\n\\n\".format(infos[j].ID))\n with open(data_file_name, 'r', encoding='utf8') as f:\n txt = f.read()\n dupfile.write(txt + '\\n\\n')\n dupfile.write(\"- [ ] [{:d}:{:d}]\\t<{:s}>\\t{:s}\\n\\n\".format(infos[j].Pos_b, infos[j].Pos_e, infos[j].Category, infos[j].Privacy) )\n dupfile.write(\"- [ ] [{:d}:{:d}]\\t<{:s}>\\t{:s}\\n\\n\".format(infos[j+1].Pos_b, infos[j+1].Pos_e, infos[j+1].Category, infos[j+1].Privacy))\n # break\n self.logger.log_message(\"detect():\\t\", \"detect \", dup_count, \" duplications\")\n\n @staticmethod\n def auto_clean_judge(infos: \"List[LabelInfo]\") -> \"Set[LabelInfo]\":\n \"\"\"\n ๅ†ณ่ฎฎๆถˆ้™คinfosๅˆ—่กจไธญ้‡ๅ ็š„ๆ ‡็ญพ๏ผŒ่ฟ”ๅ›žๅˆคๆ–ญๅบ”ๅฝ“ๅˆ ๅŽป็š„ๆ ‡็ญพใ€‚\n ๅฏนไบŽ้‡ๅ ็š„้ƒจๅˆ†๏ผŒๅฐ†ไฟ็•™่พƒ้•ฟ็š„ไธ€ไธชใ€‚\n \"\"\"\n \n def length(info: LabelInfo)->int:\n return info.Pos_e - info.Pos_b + 1\n \n ret = set()\n infos.sort(key=lambda info: info.Pos_b)\n for i in range(len(infos)):\n j = i + 1\n while infos[i] not in ret and j < len(infos) and infos[i].Pos_e >= infos[j].Pos_b:\n if length(infos[i]) < length(infos[j]):\n ret.add(infos[i])\n else:\n ret.add(infos[j])\n j += 1\n return ret\n\n\n def auto_clean_all(self):\n\n # def length(info: LabelInfo):\n # return info.Pos_e - info.Pos_b + 1\n\n signature = \"auto_clean():\\t\"\n raw_data_count = len(os.listdir(self.raw_data_set_dir + \"/train/data\"))\n self.logger.log_message(\"auto_clean():\", \"raw data count:\\t\", raw_data_count)\n reader = LabelFileReader()\n \n dup_count = 0\n for i in range(raw_data_count):\n clean_data_name = self.dup_cleaned_set_dir + \"/auto_cleaned/\" + str(i) + \".csv\"\n label_file_name = self.raw_data_set_dir + \"/train/label/\" + str(i) + \".csv\"\n with open(label_file_name, 'r', encoding='utf8') as f:\n infos = reader.load(f)\n # infos.sort(key=lambda info: info.Pos_b)\n # print(infos)\n # j = 0\n # while j < len(infos) - 1:\n # if infos[j].Pos_e >= infos[j + 1].Pos_b:\n # self.logger.log_message(signature, \"detect duplication with id={:d}\".format(infos[j].ID))\n # dup_count += 1\n # to_remove = infos[j + 1] if length(infos[j]) > length(infos[j + 1]) else infos[j]\n # self.logger.log_message(signature, \"remove [\", to_remove, \"] from label-set\")\n # infos.remove(to_remove)\n # else:\n # j += 1\n to_remove = self.auto_clean_judge(infos)\n if len(to_remove) != 0:\n dup_count += len(to_remove)\n self.logger.log_message(signature, \"detect {:d} duplication with id={:d}\".format(len(to_remove), infos[0].ID))\n self.logger.log_message(signature, \"removing \", to_remove)\n for info in to_remove:\n infos.remove(info)\n\n with open(clean_data_name, 'w', encoding='utf8') as f:\n reader.dump(infos, f)\n self.logger.log_message(signature, \"detect \", dup_count, \" duplications\")\n\n \n \nif __name__ == \"__main__\":\n detector = DuplicationDetector()\n detector.auto_clean_all()\n " }, { "alpha_fraction": 0.5866067409515381, "alphanum_fraction": 0.5908827185630798, "avg_line_length": 45.3020133972168, "blob_id": "5637287cbac9ab4cec9c6dc30a70b22d215238e5", "content_id": "433a2f086385338bdce515c01296464cd2b16203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14312, "license_type": "no_license", "max_line_length": 150, "num_lines": 298, "path": "/core/preprocessor/re_preprocessor.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..config.DefaultConfig import DefaultConfig\nfrom ..utils import alloc_logger\nfrom .duplication_detector import DuplicationDetector\nfrom .label_file_reader import LabelFileReader\nfrom .label_formatter import LabelFormatter\nfrom .mismatch_detector import MismatchDetector\nfrom .label_transformer import LabelTrasformer\nfrom .divider import Divider\nimport os\nimport json\nimport random\n\n\nclass RePreprocessor:\n def __init__(self, origin_dir: str = None, target_dir: str = None):\n super().__init__()\n self.origin_dir = origin_dir if origin_dir is not None else DefaultConfig.PATHS.DATA_CCF_RAW\n self.target_dir = target_dir if target_dir is not None else DefaultConfig.PATHS.DATA_CCF_CLEANED\n self.duplication_detector = DuplicationDetector()\n self.reader = LabelFileReader()\n self.label_formatter = LabelFormatter()\n self.mismatch_detector = MismatchDetector()\n self.logger = alloc_logger(\"re_reprocessor.log\", RePreprocessor)\n self.trasformer = self.label_formatter.fit(self.origin_dir + \"/train/label\")\n\n def produce_train(self, max_size: int = None):\n \"\"\"\n ๅฆ‚ๆžœไธๆŒ‡ๅฎš max_size ๆˆ–ๆŒ‡ๅฎšไธบNone, ๅฐ†ไธไผšๅฏนๅŽŸๅง‹ๆ ทๆœฌ่ฟ›่กŒๅˆ†ๅ‰ฒ, \n ๅฆ‚ๆžœไผ ๅ…ฅไบ†ๆŸไธชๆ•ดๆ•ฐ, ๅฐ†ไผšๆŠŠ้•ฟๅบฆๅคงไบŽ max_size ็š„ๆ–‡ๆœฌๆŒ‰็…งๅฐฝๅฏ่ƒฝ้ ๅŽ็š„ๅฅๅทๆ‹†ๅˆ†ไธบ้•ฟๅบฆๅฐไบŽ max_size ็š„่‹ฅๅนฒๆฎต.\n ๆ‹†ๅˆ†็ดขๅผ•ๅฐ†ไฟๅญ˜่‡ณ data/info/split_index_train.json.\n @parm max_size: ๅ•ไธช่พ“ๅ…ฅๆ ทไพ‹็š„ๆœ€ๅคง้•ฟๅบฆ.\n \"\"\"\n signature = \"produce_train():\\t\"\n self.logger.log_message(signature, \"maxsize=\", max_size)\n self.logger.log_message(signature, \"start!\")\n\n origin_data_count = len(os.listdir(self.origin_dir + \"/train/data\"))\n self.logger.log_message(signature, \"origin data count:\\t\", origin_data_count)\n file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_origin_bio.txt\")\n ofs = open(file_name, 'w', encoding='utf8')\n\n reader = LabelFileReader()\n dup_count = 0\n unsolve_mismatch = []\n origin_total_entity_count = 0\n cleaned_entity_count = 0\n\n remove_vx_count = 0\n remove_email_count = 0\n remove_mobile_count = 0\n remove_QQ_count = 0\n\n for i in range(origin_data_count):\n with open(self.origin_dir + \"/train/label/{:d}.csv\".format(i), 'r', encoding='utf8') as f:\n infos = reader.load(f)\n with open(self.origin_dir + \"/train/data/{:d}.txt\".format(i), 'r', encoding='utf8') as f:\n data = f.read()\n\n data, infos = self.mismatch_detector.fix_mismatch(data, infos)\n\n if data is None or infos is None:\n unsolve_mismatch.append(i)\n continue\n origin_total_entity_count += len(infos)\n\n new_infos = [info for info in infos if info.Category != 'vx']\n remove_vx_count += len(infos) - len(new_infos)\n infos = new_infos\n \n new_infos = [info for info in infos if info.Category != 'email']\n remove_email_count += len(infos) - len(new_infos)\n infos = new_infos\n \n new_infos = [info for info in infos if info.Category != 'mobile']\n remove_mobile_count += len(infos) - len(new_infos)\n infos = new_infos\n \n new_infos = [info for info in infos if info.Category != 'QQ']\n remove_QQ_count += len(infos) - len(new_infos)\n infos = new_infos\n\n to_remove = self.duplication_detector.auto_clean_judge(infos)\n\n for info in to_remove:\n infos.remove(info)\n dup_count += 1\n self.logger.log_message(signature, \"[{:d}]\\tremoving dup\\t\".format(i), reader.dumps(info))\n\n cleaned_entity_count += len(infos)\n\n labels = self.label_formatter.infos_to_bio_str_list_label(infos, len(data))\n\n for idx, ch in enumerate(data):\n ofs.write(ch + ' ' + labels[idx] + '\\n')\n ofs.write(\"\\n\")\n \n ofs.close()\n self.logger.log_message(signature, \"origin file count={:d}\".format(origin_data_count))\n self.logger.log_message(signature, \"save result in file:\", file_name)\n self.logger.log_message(signature, \"origin entity count=\", origin_total_entity_count)\n self.logger.log_message(signature, \"remove vx \", remove_vx_count, \" times\")\n self.logger.log_message(signature, \"remove email \", remove_email_count, \" times\")\n self.logger.log_message(signature, \"remove mobile \", remove_mobile_count, \" times\")\n self.logger.log_message(signature, \"remove QQ \", remove_QQ_count, \" times\")\n self.logger.log_message(signature, \"remove duplication {:d} times\".format(dup_count))\n self.logger.log_message(signature, \"cleaned entity count=\", cleaned_entity_count)\n self.logger.log_message(signature, \"detect {:d} unsolved mismatch\".format(len(unsolve_mismatch)))\n # self.logger.log_message(signature, \"output file count={:d}\".format(alloc_file_num))\n if len(unsolve_mismatch) != 0:\n self.logger.log_message(signature, \"their ID are:\")\n for unsolve_id in unsolve_mismatch:\n self.logger.log_message(signature, \"\\t{:d}\".format(unsolve_id))\n\n self.mismatch_detector.save()\n self.logger.log_message(signature, \"finish!\")\n\n def divide_train_by_index(self, test_start_index: int):\n signature = \"divide_train_by_index()\\t\"\n origin_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_origin_bio.txt\")\n train_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_train_bio.txt\")\n test_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_test_bio.txt\")\n self.logger.log_message(signature, \"start!\")\n self.logger.log_message(signature, origin_file_name)\n self.logger.log_message(signature, \"\\t|\")\n self.logger.log_message(signature, \"\\t+ -[train]- ->\", train_file_name)\n self.logger.log_message(signature, \"\\t+ -[test ]- ->\", test_file_name)\n with open(origin_file_name, 'r', encoding='utf8') as f:\n samples = f.read().strip().split('\\n\\n')\n train_ofs = open(train_file_name, 'w', encoding='utf8')\n test_ofs = open(test_file_name, 'w', encoding='utf8')\n train_count = 0\n test_count = 0\n total_count = 0\n for i, sample in enumerate(samples):\n sample = sample.replace('\\r', '')\n if i < test_start_index:\n train_ofs.write(sample + \"\\n\\n\")\n train_count += 1\n else:\n test_ofs.write(sample + \"\\n\\n\")\n test_count += 1\n \n total_count += 1\n train_ofs.close()\n test_ofs.close()\n self.logger.log_message(signature, \"train count=\", train_count)\n self.logger.log_message(signature, \"test count=\", test_count)\n self.logger.log_message(signature, \"total count=\", total_count)\n self.logger.log_message(signature, \"finish!\")\n\n\n\n def divide_train(self, train_rate: float=0.8):\n signature = \"divide_train()\\t\"\n origin_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_origin_bio.txt\")\n train_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_train_bio.txt\")\n test_file_name = os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train_test_bio.txt\")\n self.logger.log_message(signature, \"start!\")\n self.logger.log_message(signature, origin_file_name)\n self.logger.log_message(signature, \"\\t|\")\n self.logger.log_message(signature, \"\\t+ -[train]- ->\", train_file_name)\n self.logger.log_message(signature, \"\\t+ -[test ]- ->\", test_file_name)\n with open(origin_file_name, 'r', encoding='utf8') as f:\n samples = f.read().strip().split('\\n\\n')\n train_ofs = open(train_file_name, 'w', encoding='utf8')\n test_ofs = open(test_file_name, 'w', encoding='utf8')\n train_count = 0\n test_count = 0\n total_count = 0\n for sample in samples:\n sample = sample.replace('\\r', '')\n if random.random() < train_rate:\n train_ofs.write(sample + \"\\n\\n\")\n train_count += 1\n else:\n test_ofs.write(sample + \"\\n\\n\")\n test_count += 1\n \n total_count += 1\n train_ofs.close()\n test_ofs.close()\n self.logger.log_message(signature, \"train count=\", train_count)\n self.logger.log_message(signature, \"test count=\", test_count)\n self.logger.log_message(signature, \"total count=\", total_count)\n self.logger.log_message(signature, \"finish!\")\n\n def produce_test(self, max_size: int = None):\n \"\"\"\n ๅฆ‚ๆžœไธๆŒ‡ๅฎš max_size ๆˆ–ๆŒ‡ๅฎšไธบNone, ๅฐ†ไธไผšๅฏนๅŽŸๅง‹ๆ ทๆœฌ่ฟ›่กŒๅˆ†ๅ‰ฒ, \n ๅฆ‚ๆžœไผ ๅ…ฅไบ†ๆŸไธชๆ•ดๆ•ฐ, ๅฐ†ไผšๆŠŠ้•ฟๅบฆๅคงไบŽ max_size ็š„ๆ–‡ๆœฌๆŒ‰็…งๅฐฝๅฏ่ƒฝ้ ๅŽ็š„ๅฅๅทๆ‹†ๅˆ†ไธบ้•ฟๅบฆๅฐไบŽ max_size ็š„่‹ฅๅนฒๆฎต.\n ๆ‹†ๅˆ†็ดขๅผ•ๅฐ†ไฟๅญ˜่‡ณ data/info/split_index_test.json.\n @parm max_size: ๅ•ไธช่พ“ๅ…ฅๆ ทไพ‹็š„ๆœ€ๅคง้•ฟๅบฆ.\n \"\"\"\n signature = \"produce_test():\\t\"\n self.logger.log_message(signature, \"start!\")\n self.logger.log_message(signature, \"maxsize=\", max_size)\n\n origin_data_count = len(os.listdir(self.origin_dir + \"/test\"))\n alloc_file_num = origin_data_count\n self.logger.log_message(signature, \"origin data count:\\t\", origin_data_count)\n\n if max_size is not None:\n divider = Divider(max_size)\n divide_index = [] # [{\"target\": target_id, \"origin\": origin_id, \"beg\": beg_index}]\n\n count = 0\n for i in range(origin_data_count):\n with open(self.origin_dir + \"/test/{:d}.txt\".format(i), 'r', encoding='utf8') as f:\n data = f.read()\n\n new_data = self.mismatch_detector.remove_special_char(data)\n if len(data) != len(new_data):\n count += 1\n\n data = new_data\n\n # divide & save\n if max_size is not None:\n divide_result = divider.detect_division(data)\n if len(divide_result) > 1:\n self.logger.log_message(signature, \"[{:d}]\\tlen={:d}\".format(i, len(data)))\n # self.logger.log_message(signature, \"[{:d}]\\tdivide points:\\t\".format(i), divide_result)\n for j in range(len(divide_result)):\n beg = divide_result[j]\n end = divide_result[j + 1] if j < len(divide_result) - 1 else -1\n target = i\n if j != 0:\n target = alloc_file_num\n alloc_file_num += 1\n divide_index.append({\"target\": target, \"origin\": i, \"beg\": beg})\n if len(divide_result) > 1:\n self.logger.log_message(signature, \"[{:d}]\\t\".format(i),\n \"({:3d}:{:3d})->[{:d}]\".format(beg, end, target))\n with open(self.target_dir + \"/test/data/{:d}.txt\".format(target), 'w', encoding='utf8') as f:\n f.write(data[beg: end])\n\n else:\n with open(self.target_dir + \"/test/data/{:d}.txt\".format(i), 'w', encoding='utf8') as f:\n f.write(data)\n\n if max_size is not None:\n split_index_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"split_index_test.json\")\n self.logger.log_message(signature, \"saving split index in file[\", split_index_file_name, ']')\n with open(split_index_file_name, 'w', encoding='utf8') as f:\n json.dump(divide_index, f)\n self.logger.log_message(signature, \"changed {:d} data files!\".format(count))\n self.logger.log_message(signature, \"finish!\")\n\n\ndef quick_preproduce(max_size: int, train_rate: float=1, origin_dir: str = None, target_dir: str = None, test_start_index: int=-1) -> LabelTrasformer:\n \"\"\"\n ๅฐ่ฃ…ๅฅฝ็š„ๅฟซ้€Ÿ่ฟ›่กŒ้ข„ๅค„็†็š„ๅ‡ฝๆ•ฐ.\n ๅฆ‚ๆžœไธๆŒ‡ๅฎš max_size ๆˆ–ๆŒ‡ๅฎšไธบNone, ๅฐ†ไธไผšๅฏนๅŽŸๅง‹ๆ ทๆœฌ่ฟ›่กŒๅˆ†ๅ‰ฒ, \n ๅฆ‚ๆžœไผ ๅ…ฅไบ†ๆŸไธชๆ•ดๆ•ฐ, ๅฐ†ไผšๆŠŠ้•ฟๅบฆๅคงไบŽ max_size ็š„ๆ–‡ๆœฌๆŒ‰็…งๅฐฝๅฏ่ƒฝ้ ๅŽ็š„ๅฅๅทๆ‹†ๅˆ†ไธบ้•ฟๅบฆๅฐไบŽ max_size ็š„่‹ฅๅนฒๆฎต.\n ๆ‹†ๅˆ†็ดขๅผ•ๅฐ†ๅˆ†ๅˆซไฟๅญ˜่‡ณ data/info/split_index_train.json ๅ’Œ data/info/split_index_test.json.\n @parm max_size: ๅ•ไธช่พ“ๅ…ฅๆ ทไพ‹็š„ๆœ€ๅคง้•ฟๅบฆ.\n \"\"\"\n logger = alloc_logger()\n if target_dir is None:\n try:\n new_dir = DefaultConfig.PATHS.DATA_CCF_CLEANED + \"/test/data\"\n logger.log_message(\"mkdir \" + new_dir)\n os.makedirs(new_dir)\n except FileExistsError:\n logger.log_message(\"has existed\")\n try:\n new_dir = DefaultConfig.PATHS.DATA_CCF_CLEANED + \"/test/label\"\n logger.log_message(\"mkdir \" + new_dir)\n os.makedirs(new_dir)\n except FileExistsError:\n logger.log_message(\"has existed\")\n try:\n new_dir = DefaultConfig.PATHS.DATA_CCF_CLEANED + \"/train/data\"\n logger.log_message(\"mkdir \" + new_dir)\n os.makedirs(new_dir)\n except FileExistsError:\n logger.log_message(\"has existed\")\n try:\n new_dir = DefaultConfig.PATHS.DATA_CCF_CLEANED + \"/train/label\"\n logger.log_message(\"mkdir \" + new_dir)\n os.makedirs(new_dir)\n except FileExistsError:\n logger.log_message(\"has existed\")\n re_reprocessor = RePreprocessor(origin_dir=origin_dir, target_dir=target_dir)\n re_reprocessor.produce_train()\n if test_start_index < 0:\n re_reprocessor.divide_train(train_rate)\n else:\n re_reprocessor.divide_train_by_index(test_start_index=test_start_index)\n re_reprocessor.produce_test(max_size)\n re_reprocessor.trasformer.save_to_file()\n re_reprocessor.trasformer.log_bio_type_to_file()\n return re_reprocessor.trasformer\n\n\nif __name__ == \"__main__\":\n from core.config.DefaultConfig import DefaultConfig as config\n quick_preproduce(max_size=256 - 2, test_start_index=13263)\n" }, { "alpha_fraction": 0.5440647602081299, "alphanum_fraction": 0.5600719451904297, "avg_line_length": 34.42038345336914, "blob_id": "48dbe7145b34c73d34b7879058330a29bd944e88", "content_id": "fd99a9b94be2aa91cbb76cb128eb7521a612db5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5890, "license_type": "no_license", "max_line_length": 139, "num_lines": 157, "path": "/estimator/Estimator.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport math\n\nclass Estimators:\n '''\n labelๅˆ†ๅธƒ๏ผš็ปŸ่ฎกๆฏไธชlabelๅ‡บ็Žฐๆ—ถ็š„้•ฟๅบฆ\n ๆ–‡ๆœฌ้•ฟๅบฆๅˆ†ๅธƒ็ปŸ่ฎก:็ปŸ่ฎกๆฏไธชๆ–‡ๆœฌ็š„้•ฟๅบฆ\n ๅ…ณ่”ๅบฆๅˆ†ๆž๏ผš็ปŸ่ฎก็ฑปๅˆซๅœจๆ–‡ไปถไธญ็š„ๅ‡บ็Žฐๆƒ…ๅ†ต\n '''\n def __init__(self,fileset:list):\n self.fileset=fileset\n self.labels=set()\n self.startPrintStatus()\n for i in range(len(self.fileset)):\n df=pd.read_csv('../data/CCF/raw/train//label/'+self.fileset[i]+'.csv')\n self.labels|=set(df.Category.values)\n self.continuePrintStatus(i)\n self.endPrintStatus()\n print(\"INIT DONE.\")\n \n def calculate(self):\n self.appearance=dict()\n self.coinCount=dict()\n self.entityLength4Label=dict()\n self.fileLengths=list()\n self.potentialNoise=list()\n for label in self.labels:\n self.appearance[label]=list()\n self.entityLength4Label[label]=list()\n self.coinCount[label]=dict()\n for label0 in self.labels:\n self.coinCount[label][label0]=0\n self.startPrintStatus()\n for i in range(len(self.fileset)):\n df=pd.read_csv('../data/CCF/raw/train//label/'+self.fileset[i]+'.csv')\n if True in set(df.Privacy.value_counts()>1):\n self.potentialNoise.append(self.fileset[i])\n continue\n for label in self.appearance.keys():\n if label in df.Category.unique():\n self.appearance[label].append(1)\n else:\n self.appearance[label].append(0)\n for j in range(len(df)):\n self.entityLength4Label[df.iloc[j]['Category']].append(df.iloc[j]['Pos_e']-df.iloc[j]['Pos_b']+1)\n uni=df.Category.unique()\n for j in range(len(uni)):\n for k in range(j+1,len(uni)):\n self.coinCount[uni[j]][uni[k]]+=1\n self.coinCount[uni[k]][uni[j]]+=1\n with open('../data/CCF/raw/train//data/'+self.fileset[i]+'.txt','r',encoding='utf8') as f:\n l=0\n for j in f.readlines():\n l+=len(j)\n self.fileLengths.append(l)\n self.continuePrintStatus(i)\n self.endPrintStatus()\n print('CALCULATE DONE')\n \n \n \n def startPrintStatus(self):\n self.lastStatus=0\n print('Process:',end='')\n \n def continuePrintStatus(self,n):\n status=n/len(self.fileset)\n status=math.floor(status*10)/10\n if status>self.lastStatus:\n print('โ–‡โ–‡โ–‡โ–‡โ–‡',end='')\n self.lastStatus=status\n \n def endPrintStatus(self):\n self.lastStatus=0\n print('100% completed.')\n\n def showSimilarity(self):\n '''\n ็ฌฌไธ€็ง็›ดๆŽฅ็”ฑๅ…ฑๅŒๅ‡บ็Žฐๆฌกๆ•ฐ่ฎก็ฎ—็ƒญ็‚นๅ›พ\n ็ฌฌไบŒ็งๅฐ†ๅ‡บ็Žฐๆฌกๆ•ฐ้™คไปฅไบŒ่€…็š„ๅ‡ ไฝ•ๅ‡ๅ€ผๅŽๅ†่ฎก็ฎ—็ƒญ็‚นๅ›พ\n ็ฌฌไธ‰็ง่ฎก็ฎ—ไบŒ่€…ๅ‡บ็Žฐๅ‘้‡็š„ไฝ™ๅผฆ็›ธไผผๅบฆ\n '''\n df=pd.DataFrame(self.coinCount)\n #_4sort=df.index.tolist()\n _4sort=['name','position','vx','QQ','email','mobile','movie','book','scene','game','government','address','organization','company']\n df.sort_values(by=_4sort,axis=1,inplace=True)\n df.sort_values(by=_4sort,axis=0,ascending=False,inplace=True)\n df=df.astype('float64')\n plt.figure()\n sns.heatmap(data=df,cmap='OrRd')\n for label0 in _4sort:\n for label1 in _4sort:\n df[label0][label1]=df[label0][label1]/((len(self.entityLength4Label[label0])*len(self.entityLength4Label[label1]))**0.5)\n plt.figure()\n sns.heatmap(data=df,cmap='OrRd')\n sqrtL2=dict()\n for label in _4sort:\n sqrtL2[label]=sum(self.appearance[label])**0.5\n temp=dict()\n for label0 in _4sort:\n temp[label0]=dict()\n for label1 in _4sort:\n mulsum=0\n for i in range(len(self.appearance[label0])):\n mulsum+=self.appearance[label0][i]*self.appearance[label1][i]\n temp[label0][label1]=mulsum/sqrtL2[label0]/sqrtL2[label1]\n df=pd.DataFrame(temp)\n df.sort_values(by=_4sort,axis=1,inplace=True)\n df.sort_values(by=_4sort,axis=0,inplace=True,ascending=False)\n plt.figure()\n sns.heatmap(data=df,cmap='OrRd')\n for label in _4sort:\n df[label][label]=0\n plt.figure()\n sns.heatmap(data=df,cmap='OrRd')\n \n def showLabelDistribution(self):\n '''\n ็ฌฌไธ€็งๆ˜ฏlabelๅ‡บ็Žฐๆฌกๆ•ฐๆŽ’ๅบๅ›พ\n ็ฌฌไบŒ็งๆ˜ฏlabelๆ‰€ๅฏนๅบ”entityๅนณๅ‡้•ฟๅบฆๆŽ’ๅบๅ›พ\n '''\n count1=dict()\n xLabel=list(self.entityLength4Label.keys())\n for label in xLabel:\n count1[label]=len(self.entityLength4Label[label])\n xLabel.sort(key=lambda x:count1[x])\n plt.figure()\n sns.barplot(x=xLabel,y=[count1[i] for i in xLabel])\n \n count2=dict()\n for label in xLabel:\n count2[label]=sum(self.entityLength4Label[label])/len(self.entityLength4Label[label])\n xLabel.sort(key=lambda x:count2[x])\n plt.figure()\n sns.barplot(x=xLabel,y=[count2[i] for i in xLabel])\n \n def showFileLength(self):\n '''\n ็ฌฌไธ€ๅผ ๅ›พๆ˜ฏ้•ฟๅบฆๅˆ†ๅธƒๅ›พ\n ็ฌฌไบŒๅผ ๅ›พๆ˜ฏ้€š่ฟ‡ๆ ธๅฏ†ๅบฆไผฐ่ฎก็ฎ—ๅ‡บ็š„ๅˆ†ๅธƒๅ›พ\n '''\n plt.figure()\n sns.distplot(a=self.fileLengths,kde=False)\n plt.figure()\n sns.kdeplot(data=self.fileLengths,shade=True)\n \n\nif __name__=='__main__':\n fileset=[str(i) for i in range(2515)]\n es=Estimators(fileset)\n es.calculate()\n es.showSimilarity()\n es.showLabelDistribution()\n es.showFileLength()" }, { "alpha_fraction": 0.5954364538192749, "alphanum_fraction": 0.5977906584739685, "avg_line_length": 36.060401916503906, "blob_id": "1cff3a25a2724dfa0aa140d78363f122a281866b", "content_id": "3741c0b90d7a4fa26218eb864d8bfae1c28d282a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5522, "license_type": "no_license", "max_line_length": 102, "num_lines": 149, "path": "/core/preprocessor/label_transformer.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import enum\nimport os\nimport json\nfrom ..utils import alloc_logger\nfrom ..config.DefaultConfig import DefaultConfig\nfrom collections import namedtuple\n\nBMESOLabel = namedtuple(\"BMESOLabel\", [\"name\", \"type\"])\n\nclass LabelType(enum.IntEnum):\n O = 0\n B = 1\n M = 2\n E = 3\n S = 4\n\nclass LabelTrasformer:\n def __init__(self, possible_labels: \"Iterable[BMESOLabel]\"):\n self.LABEL_O = BMESOLabel(\"\", LabelType.O)\n self.label_table = []\n self.label_index = {} # k=label: BMESOLabel, v=index: int\n\n self.logger = alloc_logger(\"label_utils.log\",LabelTrasformer)\n self.logger.log_message(\"init(): build index:\")\n\n _set = set(possible_labels)\n self.label_table = [label for label in _set if label.type is not LabelType.O] # delete all O\n self.label_table.sort(key=lambda label: label.type)\n self.label_table.sort(key=lambda label: label.name)\n\n self.label_table = [self.LABEL_O] + self.label_table # make O as zero\n for idx, label in enumerate(self.label_table):\n self.label_index[label] = idx\n self.logger.log_message(idx, \"\\t:\\t\", self.label_to_string(label))\n\n def log_bio_type_to_file(self):\n table_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"bio_table.json\")\n\n save_label_table = []\n s = set()\n for label in self.label_table:\n bio_string = self.label_to_bio_string(label)\n if bio_string not in s:\n s.add(bio_string)\n save_label_table.append(bio_string)\n\n with open(table_file_name, 'w', encoding='utf8') as f:\n json.dump(save_label_table, f)\n self.logger.log_message(\"save bio label table in file [\", table_file_name, ']')\n\n def save_to_file(self):\n map_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"trans_map.json\")\n table_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"trans_table.json\")\n\n save_label_table = list(map(self.label_to_string, self.label_table))\n save_label_index = {}\n for k, v in self.label_index.items():\n save_label_index[self.label_to_string(k)] = v\n\n with open(table_file_name, 'w', encoding='utf8') as f:\n json.dump(save_label_table, f)\n self.logger.log_message(\"save label table in file [\", table_file_name, ']')\n with open(map_file_name, 'w', encoding='utf8') as f:\n json.dump(save_label_index, f)\n self.logger.log_message(\"save trans map in file [\", map_file_name, ']')\n\n @staticmethod\n def load_from_file(map_file_name: str = None, table_file_name: str = None):\n if map_file_name is None:\n map_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"trans_map.json\")\n if table_file_name is None:\n table_file_name = os.path.join(DefaultConfig.PATHS.DATA_INFO, \"trans_table.json\")\n\n \n ret = LabelTrasformer([])\n with open(table_file_name, 'r', encoding='utf8') as f:\n save_label_table = json.load(f)\n ret.logger.log_message(\"load label table from file [\", table_file_name, ']')\n with open(map_file_name, 'r', encoding='utf8') as f:\n save_label_index = json.load(f)\n ret.logger.log_message(\"load trans map from file [\", map_file_name, ']')\n \n ret.label_table = list(map(ret.string_to_label, save_label_table))\n for k, v in save_label_index.items():\n ret.label_index[ret.string_to_label(k)] = v\n \n return ret\n\n \n\n def label_to_integer(self, label: BMESOLabel) -> int:\n if label.type is LabelType.O:\n return 0\n return self.label_index[label]\n \n def integer_to_label(self, integer: int) -> BMESOLabel:\n return self.label_table[integer]\n\n def label_to_bio_string(self, label: BMESOLabel) -> str:\n if label.type is LabelType.O:\n return \"O\"\n tp = label.type\n if tp == LabelType.M or tp == LabelType.E:\n tp = \"I\"\n elif tp == LabelType.S:\n tp = \"B\"\n else:\n tp = tp.name\n return tp + '-' + label.name\n\n def label_to_string(self, label: BMESOLabel) -> str:\n if label.type is LabelType.O:\n return \"O\"\n return label.type.name + '-' + label.name\n\n def string_to_label(self, string: str) -> BMESOLabel:\n if string == \"O\":\n return self.LABEL_O\n return BMESOLabel(string[2:], LabelType[string[:1]])\n\n\n\nif __name__ == \"__main__\":\n l = BMESOLabel(\"shit\", LabelType.B)\n transformer = LabelTrasformer(\n [\n BMESOLabel(\"shit\", LabelType.B), \n BMESOLabel(\"shit\", LabelType.M),\n BMESOLabel(\"shit\", LabelType.E),\n BMESOLabel(\"fuck\", LabelType.B)\n ]\n )\n print(l)\n print(transformer.label_to_string(l))\n print(transformer.label_to_integer(l))\n ll = BMESOLabel(\"shit\", LabelType.B)\n lll = BMESOLabel(\"shit\", LabelType.M)\n llll = BMESOLabel(\"fuck\", LabelType.B)\n print(transformer.label_to_string(ll))\n print(transformer.label_to_string(lll))\n print(transformer.label_to_string(llll))\n print(transformer.label_to_integer(llll))\n\n l = transformer.string_to_label(\"B-shit\")\n print(transformer.label_to_integer(l))\n ll = transformer.string_to_label(\"E-shit\")\n print(transformer.label_to_integer(ll))\n lll = transformer.string_to_label(\"B-fuck\")\n print(transformer.label_to_integer(lll))\n" }, { "alpha_fraction": 0.5824039578437805, "alphanum_fraction": 0.643122673034668, "avg_line_length": 17.340909957885742, "blob_id": "43f34b982fcb7be5356ba6301d903bd4ce365042", "content_id": "e4ae0e7743107a8107eed76f604b9d605081885c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 68, "num_lines": 44, "path": "/README.md", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "# NER-CCF2020-HeapOverflow\nCCF 2020 competition repo for ID=472\n\n## Stage 1 - Midterm Check\n\n### ่ต›้ข˜ไป‹็ป\n - ่ต›้ข˜่ƒŒๆ™ฏๅŠๆ„ไน‰ \n - ไปปๅŠกๅฎšไน‰ๅŠ็›ฎๆ ‡ Formal\n - ่ฏ„ไปทๆŒ‡ๆ ‡\n - [่ต›้ข˜็ฝ‘ๅ€](https://www.datafountain.cn/competitions/472)\n\n### ๆ•ฐๆฎ้›†็ปŸ่ฎก้‡\n - seaborn่ฟ›่กŒ็”ปๅ›พ\n - ๅˆ†ๆžๆฏไธชlabel็š„ๅˆ†ๅธƒ, ไฝœๅ›พๅˆ†ๆž (T1)\n - ๆ–‡ๆœฌ้•ฟๅบฆๅˆ†ๅธƒ็ปŸ่ฎก (T2)\n - ็ฑปๅˆซๅ…ณ่”ๅบฆๅˆ†ๆž, ็ปŸ่ฎกไธ€ไธช็ฑป็š„ๅฎžไฝ“ๅˆ†ๅธƒ็š„ๅˆ†ๆ•ฃๆˆ–่šๆ‹ข็จ‹ๅบฆ (T3)\n\n### ๆจกๅž‹ๆžถๆž„ไป‹็ป\n - ๆ•ดไฝ“ๆจกๅž‹ๅ›พ\n - ๆฏไธ€ๅฑ‚็š„ๅŠŸ่ƒฝ\n - ๆ•ฐๆฎๆต็ฎ€ไป‹\n - ๅผบ่ฐƒๅฏๅฎž็Žฐๆ€ง\n\n### ๆจกๅž‹ไผ˜ๅŒ–\n - ! ๆ•ฐๆฎ้›†ๅˆ’ๅˆ†ไฝฟ็”จPๆฌกKๆŠ˜ไบคๅ‰้ชŒ่ฏๆณ•, ๆจกๅž‹่ถ…ๅ‚ๆ•ฐ่ฐƒไผ˜\n - ๅฏนๆŸๅคฑๅ‡ฝๆ•ฐๅŸบไบŽlabelๅˆ†ๅธƒ่ฟ›่กŒ่ฐƒๆ•ด\n - ๆ•ฐๆฎๅขžๅผบๆ้ซ˜ๆจกๅž‹ๆณ›ๅŒ–ๆ€ง่ƒฝ\n - ่€ƒ่™‘ๆ‹“ๅฑ•ๆ•ฐๆฎ้›†\n\n### ๅˆ†ๅทฅ่ฎกๅˆ’\n - ไฝœๅ›พ, ๅคง่‡ดๆ ‡่ฎฐ้œ€่ฆๅนฒไป€ไนˆไบ‹\n - ๆ•ฐๆฎ้ข„ๅค„็† (DXY) (ๅˆ ้™คๅผ‚ๅธธๆ•ฐๆฎ, ๆ•ฐๆฎ็ปŸ่ฎก้‡, labelๆ ผๅผๅŒ–)\n - ๆจกๅž‹ๅฎž็Žฐ (HZX, KJS)\n - ่ฎญ็ปƒ่ฟ‡็จ‹ๆŽงๅˆถ (LMX)\n - ๆจกๅž‹ไผ˜ๅŒ– (ALL) ๅ…ทไฝ“ไผ˜ๅŒ–ๅฎž็Žฐ้œ€่ฆ่ง†ๆƒ…ๅ†ต่€Œๅฎš\n - Timeline\n - 11.8 ๆตทๆŠฅไธŽPPTๅˆถไฝœ\n - 11.20 ๆจกๅž‹ๅฎž็Žฐ็ฌฌไธ€็จฟๅฎŒๆˆ\n - 12.6 ๆจกๅž‹ๆ€ง่ƒฝไผ˜ๅŒ–\n\n### ่กฅๅ……่ฏดๆ˜Ž\n - 11.8 12:00 KJSๆจกๅž‹ๆžถๆž„ๅŠREFERENCE HZXไผ˜ๅŒ–ๅ’Œๅˆ†ๅทฅไป‹็ป LMXๅฎŒๆˆๆ•ฐๆฎๆบ็ปŸ่ฎก้‡ๅŠ็ป˜ๅ›พ DXY่ต›้ข˜ไป‹็ป้ƒจๅˆ†\n - 11.8 23:59 DXY ๅฎŒๆˆPPTๅˆถไฝœ\n - 11.9 10:00 ๅผ€ไผš่ฎจ่ฎบๆตทๆŠฅๅˆถไฝœ\n" }, { "alpha_fraction": 0.4785642921924591, "alphanum_fraction": 0.4810568392276764, "avg_line_length": 39.119998931884766, "blob_id": "0a1d8f9688ddf48d973a4d71489590a67d128f5f", "content_id": "59c2d6070da2d01d0fa4e3fca8d0659c2448827f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10302, "license_type": "no_license", "max_line_length": 121, "num_lines": 250, "path": "/core/preprocessor/label_formatter.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import json\nimport os\nfrom ..config.DefaultConfig import DefaultConfig\nfrom .label_file_reader import LabelFileReader\nfrom .label_file_reader import LabelInfo\nfrom ..utils import alloc_logger\nfrom .label_transformer import BMESOLabel\nfrom .label_transformer import LabelType\nfrom .label_transformer import LabelTrasformer\nfrom collections import Counter\n\n\nclass LabelFormatter:\n def __init__(self,\n data_dir=None,\n label_dir=None,\n target_dir=None,\n console_output: bool=True\n ):\n self.data_dir = data_dir if data_dir is not None else DefaultConfig.PATHS.DATA_CCF_RAW + \"/train/data\"\n self.label_dir = label_dir if label_dir is not None else DefaultConfig.PATHS.DATA_CCF_RAW + \"/train/label\"\n self.target_dir = target_dir if target_dir is not None else DefaultConfig.PATHS.DATA_CCF_FMT + \"/label\"\n self.logger = alloc_logger(\"label_utils.log\", LabelFormatter,console_output=console_output)\n self.logger.log_message(\"creating - - - - - - - - - - - - - - - - - - -\")\n self.logger.log_message(\"data_dir=\", self.data_dir)\n self.logger.log_message(\"label_dir=\", self.label_dir)\n self.logger.log_message(\"target_dir=\", self.target_dir)\n self.logger.log_message(\"end - - - - - - - - - - - - - - - - - - - - -\")\n self.transformer = None\n\n\n def fit(self, label_dir: str=None) -> LabelTrasformer:\n label_dir = label_dir if label_dir is not None else self.label_dir\n origin_data_count = len(os.listdir(label_dir))\n self.logger.log_message(\"fit():\", \"label file count:\\t\", origin_data_count)\n reader = LabelFileReader()\n label_set = set() # set[BMESOLabel]\n label_set.add(BMESOLabel(\"\", LabelType.O))\n\n for i in range(origin_data_count):\n with open(label_dir + \"/{:d}.csv\".format(i), 'r', encoding='utf8') as f:\n infos = reader.load(f)\n\n for info in infos:\n type_name = info.Category\n start_index = info.Pos_b\n end_index = info.Pos_e\n if end_index - start_index > 1:\n label_set.add(BMESOLabel(type_name, LabelType.B))\n label_set.add(BMESOLabel(type_name, LabelType.M))\n label_set.add(BMESOLabel(type_name, LabelType.E))\n elif start_index == end_index: \n label_set.add(BMESOLabel(type_name, LabelType.S))\n else:\n label_set.add(BMESOLabel(type_name, LabelType.B))\n label_set.add(BMESOLabel(type_name, LabelType.E))\n\n self.transformer = LabelTrasformer(label_set)\n return self.transformer\n\n def load_transformer_from_file(self, file_name: str=None):\n self.transformer = LabelTrasformer.load_from_file(file_name)\n \n\n def infos_to_integer_list_label(self, infos: \"Iterable[LabelInfo]\", length: int) -> \"List[int]\":\n lst = [0] * length\n for info in infos:\n type_name = info.Category\n start_index = info.Pos_b\n end_index = info.Pos_e\n\n # ๅ•ๅญ—\n if start_index == end_index: \n lst[start_index] = self.transformer.label_to_integer(BMESOLabel(type_name, LabelType.S)) # ๆ ‡่ฎฐๅ•ๅญ—็Ÿญ่ฏญ\n continue\n\n # ๅคšๅญ—\n m_sym = self.transformer.label_to_integer(BMESOLabel(type_name, LabelType.M)) # ๅ่ฏ็Ÿญ่ฏญไธญ้—ด็š„ๆ ‡่ฎฐ\n for i in range(start_index, end_index + 1):\n if i == start_index:\n lst[i] = self.transformer.label_to_integer(BMESOLabel(type_name, LabelType.B)) # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ๅผ€ๅคด\n continue\n if i == end_index:\n lst[i] = self.transformer.label_to_integer(BMESOLabel(type_name, LabelType.E)) # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„็ป“ๅฐพ\n continue\n lst[i] = m_sym # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ไธญ้—ด้ƒจๅˆ†\n return lst\n\n def bio_str_list_label_and_token_list_to_infos(self, \n ID: int, \n bio_str_list:\"Iterable[str]\", \n tokens:\"Iterable[str]\", \n receive_rate:float) -> \"List[LabelIndo]\":\n ret = []\n reading = False\n counting = None\n beg = 0\n content = \"\"\n for i in range(len(bio_str_list)):\n label = bio_str_list[i]\n token = tokens[i].replace(\"##\", '')\n if reading:\n if label[0] in ('B', 'O'):\n counter = Counter(counting)\n most, times = counter.most_common(1)[0]\n if (times / len(counting) >= receive_rate):\n ret.append(LabelInfo(\n ID = ID,\n Category = most,\n Pos_b = beg,\n Pos_e = len(content) - 1,\n Privacy = content[beg:]\n ))\n if label[0] == 'I':\n counting.append(label[2:])\n if label[0] == 'O':\n reading = False\n if label[0] == 'B':\n counting = [label[2:]]\n beg = len(content)\n reading = True\n content += token\n if reading:\n counter = Counter(counting)\n most, times = counter.most_common(1)[0]\n if (times / len(counting) >= receive_rate):\n ret.append(LabelInfo(\n ID = ID,\n Category = most,\n Pos_b = beg,\n Pos_e = len(content) - 1,\n Privacy = content[beg:]\n ))\n return ret\n\n def integer_list_label_and_data_to_infos(self, ID: int, integer_list:\"Iterable[int]\", data:str) -> \"List[LabelIndo]\":\n label_list = [None] * len(integer_list)\n ret = []\n for i in range(len(integer_list)):\n label_list[i] = self.transformer.integer_to_label(integer_list[i])\n\n reading = False\n name = \"\"\n beg = 0\n for row, label in enumerate(label_list):\n if reading:\n if label.type == LabelType.E and label.name == name:\n new_info = LabelInfo(\n ID = ID,\n Category = name,\n Pos_b = beg,\n Pos_e = row,\n Privacy = data[beg: row+1]\n )\n ret.append(new_info)\n continue\n if label.type == LabelType.M and label.name == name:\n continue\n if label.type == LabelType.B:\n name = label.name\n beg = row\n continue\n reading = False\n else:\n # ! reading\n if label.type == LabelType.B:\n name = label.name\n beg = row\n reading = True\n continue\n if label.type == LabelType.S:\n new_info = LabelInfo(\n ID = ID,\n Category = label.name,\n Pos_b = row,\n Pos_e = row,\n Privacy = data[row: row+1]\n )\n ret.append(new_info)\n reading = False\n continue\n reading = False\n return ret\n\n def infos_to_bio_str_list_label(self, infos: \"Iterable[LabelInfo]\", length: int) -> \"List[str]\":\n lst = [\"O\"] * length\n for info in infos:\n type_name = info.Category\n start_index = info.Pos_b\n end_index = info.Pos_e\n\n # ๅคšๅญ—\n m_sym = \"I-\" + type_name # ๅ่ฏ็Ÿญ่ฏญไธญ้—ด็š„ๆ ‡่ฎฐ\n for i in range(start_index, end_index + 1):\n if i == start_index:\n lst[i] = \"B-\" + type_name # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ๅผ€ๅคด\n continue\n lst[i] = m_sym # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ไธญ้—ดๅ’Œ็ป“ๅฐพ้ƒจๅˆ†\n return lst\n\n def infos_to_str_list_label(self, infos: \"Iterable[LabelInfo]\", length: int) -> \"List[str]\":\n lst = [\"O\"] * length\n for info in infos:\n type_name = info.Category\n start_index = info.Pos_b\n end_index = info.Pos_e\n\n # ๅ•ๅญ—\n if start_index == end_index: \n lst[start_index] = \"S-\" + type_name # ๆ ‡่ฎฐๅ•ๅญ—็Ÿญ่ฏญ\n continue\n\n # ๅคšๅญ—\n m_sym = \"M-\" + type_name # ๅ่ฏ็Ÿญ่ฏญไธญ้—ด็š„ๆ ‡่ฎฐ\n for i in range(start_index, end_index + 1):\n if i == start_index:\n lst[i] = \"B-\" + type_name # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ๅผ€ๅคด\n continue\n if i == end_index:\n lst[i] = \"E-\" + type_name # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„็ป“ๅฐพ\n continue\n lst[i] = m_sym # ๆ ‡่ฎฐๅ่ฏ็Ÿญ่ฏญ็š„ไธญ้—ด้ƒจๅˆ†\n return lst\n\n def format_all(self):\n origin_data_count = len(os.listdir(self.data_dir))\n self.logger.log_message(\"format_all():\", \"origin data count:\\t\", origin_data_count)\n reader = LabelFileReader()\n\n for i in range(origin_data_count):\n with open(self.label_dir + \"/{:d}.csv\".format(i), 'r', encoding='utf8') as f:\n infos = reader.load(f)\n with open(self.data_dir + \"/{:d}.txt\".format(i), 'r', encoding='utf8') as f:\n length = len(f.read())\n\n lst = self.infos_to_str_list_label(infos, length)\n\n # ไฟๅญ˜ๆ ‡่ฎฐๅˆ—่กจ\n with open(self.target_dir + \"/{:d}.json\".format(i), 'w', encoding='utf8') as f:\n json.dump(lst, f)\n \n\nif __name__ == \"__main__\":\n formatter = LabelFormatter(\n data_dir=None,\n label_dir=DefaultConfig.PATHS.DATA_CCF_DBG + \"/duplication_cleaned\",\n target_dir=None,\n console_output=True\n )\n formatter.format_all()\n" }, { "alpha_fraction": 0.74609375, "alphanum_fraction": 0.7467448115348816, "avg_line_length": 39.394737243652344, "blob_id": "4f3e872a1ab5dfa171d0e8730932c2caf7372b37", "content_id": "b9c8414811d62dec6562dafebe85e5a2948f065e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "no_license", "max_line_length": 88, "num_lines": 38, "path": "/Main.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "import argparse\nimport torch\nimport warnings\n\nfrom core.config.DefaultConfig import DefaultConfig as config\nfrom core import Instructor\n\nwarnings.filterwarnings('ignore')\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--cuda', action='store_true')\nparser.add_argument('--pretrained', default=config.HYPER.PRETRAINED)\nparser.add_argument('--batch_size', default=config.HYPER.BATCH_SIZE, type=int)\nparser.add_argument('--seq_len', default=config.HYPER.SEQ_LEN, type=int)\nparser.add_argument('--embed_dim', default=config.HYPER.EMBED_DIM, type=int)\nparser.add_argument('--lstm_hidden', default=config.HYPER.LSTM_HIDDEN, type=int)\nparser.add_argument('--lstm_layers', default=config.HYPER.LSTM_LAYERS, type=int)\nparser.add_argument('--lstm_directs', default=config.HYPER.LSTM_DIRECTS, type=int)\nparser.add_argument('--label_dim', default=config.HYPER.LABEL_DIM)\nparser.add_argument('--epoch', default=config.HYPER.EPOCH)\nparser.add_argument('--lr', default=config.HYPER.LR)\nparser.add_argument('--n', default=config.HYPER.N)\nparser.add_argument('--k', default=config.HYPER.K)\nparser.add_argument('--cumul_batch', default=config.HYPER.CUMUL_BATCH)\nparser.add_argument('--base_lr', default=config.HYPER.BASE_LR)\n\n\nargs = parser.parse_args()\n\nargs.device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')\n\nif __name__ == '__main__':\n instructor = Instructor.Instructor('Version8', args)\n instructor.train()\n instructor.save_module()\n # instructor.load_module()\n # instructor.genTestJson()\n\n" }, { "alpha_fraction": 0.5094632506370544, "alphanum_fraction": 0.5149717330932617, "avg_line_length": 40.282798767089844, "blob_id": "707324c102363e0f30c42b6ab135540e71c837b5", "content_id": "240fff7793228ae9991e9f498f6b01f9710a9858", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14170, "license_type": "no_license", "max_line_length": 125, "num_lines": 343, "path": "/core/preprocessor/re_result_formatter.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..utils import alloc_logger\nfrom ..config.DefaultConfig import DefaultConfig\nfrom .label_transformer import LabelTrasformer\nfrom .label_formatter import LabelFormatter\nfrom .label_file_reader import LabelFileReader\nfrom .label_file_reader import LabelInfo\nimport os\nimport json\nimport re\n\n\nclass ReResultFormatter:\n def __init__(self, split_index_file: str = None, end='\\n'):\n super().__init__()\n\n self.logger = alloc_logger(\n \"re_result_formatter.log\", ReResultFormatter)\n self.end = end\n email_pattern = r\"[a-zA-Z0-9_\\.]+@[a-zA-PR-Z0-9_\\.]+\"\n mobile_pattern = r\"[\\+๏ผ‹]?([\\(๏ผˆ]\\d+[\\)๏ผ‰])?\\d[\\-\\d]*\\d([่ฝฌ่ฝ‰]\\d+)?\"\n QQ_pattern = r\"\\d{9,10}\"\n date_pattern = r\"([12]\\d{3}\\-\\d{1,2}-\\d{1,2})|([12]\\d{3}\\-[12]\\d{3})\"\n self.email_re = re.compile(email_pattern)\n self.mobile_re = re.compile(mobile_pattern)\n self.QQ_re = re.compile(QQ_pattern)\n self.date_re = re.compile(date_pattern)\n self.combine_index = {} # origin: (beg, target)\n split_index_file = split_index_file if split_index_file is not None else os.path.join(\n DefaultConfig.PATHS.DATA_INFO, \"split_index_test.json\")\n self.logger.log_message(\n \"loading split_index from file:\\t\", split_index_file)\n\n with open(split_index_file, 'r', encoding='utf8') as f:\n m = json.load(f)\n for item in m:\n target = item[\"target\"]\n origin = item[\"origin\"]\n beg = item[\"beg\"]\n if origin in self.combine_index.keys():\n self.combine_index[origin].append((beg, target))\n else:\n self.combine_index[origin] = [(beg, target)]\n\n def combine_all(self, receive_rate: float = 0.8, result_dir: str = None):\n signature = \"conbine_all()\\t\"\n\n self.logger.log_message(signature, \"start!\")\n\n result_dir = result_dir if result_dir is not None else os.path.join(\n DefaultConfig.PATHS.DATA_CCF_CLEANED, \"test/label\")\n self.logger.log_message(signature, \"origin result dir:\\t\", result_dir)\n\n def file_num_to_tokens_and_labels(num: int) -> (list, list):\n with open(os.path.join(result_dir, \"{:d}.json\".format(num)), 'r', encoding='utf8') as f:\n result = json.load(f)\n return result[\"data\"][1:-1], result[\"tags\"][1:-1]\n\n origin_result_count = len(os.listdir(\n os.path.join(DefaultConfig.PATHS.DATA_CCF_RAW, \"test\")))\n self.logger.log_message(\n signature, \"origin result count:\\t\", origin_result_count)\n\n label_formatter = LabelFormatter()\n label_formatter.load_transformer_from_file()\n\n reader = LabelFileReader()\n\n output_csv = open(os.path.join(\n DefaultConfig.PATHS.DATA_INFO, \"predict_origin.csv\"), 'w', encoding='utf8')\n\n label_token_len_mismatch = []\n for i in range(origin_result_count):\n # labels = None\n tokens, labels = file_num_to_tokens_and_labels(i)\n if i in self.combine_index.keys():\n targets = self.combine_index[i]\n targets.sort(key=lambda t: t[0])\n for _, target in targets:\n new_tokens, new_labels = file_num_to_tokens_and_labels(\n target)\n labels += new_labels\n tokens += new_tokens\n if len(tokens) != len(labels):\n label_token_len_mismatch.append(i)\n continue\n infos = label_formatter.bio_str_list_label_and_token_list_to_infos(\n ID=i, bio_str_list=labels, tokens=tokens, receive_rate=receive_rate)\n for info in infos:\n string = reader.dumps(info)\n output_csv.write(string + self.end)\n output_csv.close()\n\n self.logger.log_message(\"mismatches:\\t\", label_token_len_mismatch)\n\n def detect_email(self, ID: int, data: str) -> \"List[LabelInfo]\":\n signature = \"detect_email()\\t\"\n ret = []\n for m in self.email_re.finditer(data):\n ret.append(LabelInfo(\n ID=ID,\n Category=\"email\",\n Pos_b=m.start(),\n Pos_e=m.end() - 1,\n Privacy=m.group()\n ))\n # if len(ret) != 0:\n # self.logger.log_message(signature, \"[{:d}] found {:d} emails by re\".format(ID, len(ret)))\n # self.logger.log_message(ret)\n return ret\n\n def detect_mobile_and_QQ(self, ID: int, data: str) -> \"List[LabelInfo]\":\n signature = \"detect_mobile_and_QQ()\\t\"\n ret = []\n for m in self.mobile_re.finditer(data):\n content = m.group()\n if len(content) < 8:\n continue\n count = 0\n num_list = [str(i) for i in range(10)]\n for ch in content:\n if ch in num_list:\n count += 1\n if count > 15:\n continue\n if self.date_re.fullmatch(content):\n continue\n clz = \"mobile\"\n if self.QQ_re.fullmatch(content):\n clz = \"QQ\"\n ret.append(LabelInfo(\n ID=ID,\n Category=clz,\n Pos_b=m.start(),\n Pos_e=m.end() - 1,\n Privacy=m.group()\n ))\n # if len(ret) != 0:\n # self.logger.log_message(signature, \"[{:d}] found {:d} QQ/mobile by re\".format(ID, len(ret)))\n # self.logger.log_message(ret)\n return ret\n\n def _print_infos_to_csv_for_id(self, ID: int, infos: list, csv_ofs, data_dir: str, detect_email=True) -> (int, int, int):\n with open(os.path.join(data_dir, \"{:d}.txt\".format(ID)), 'r', encoding='utf8') as f:\n raw_content = f.read()\n reader = LabelFileReader()\n signature = \"_print_infos_to_csv_for_id()\\t\"\n not_in_raw_count = 0\n head = 0\n to_print_infos = []\n for info in infos:\n content = info.Privacy.replace(',', '').replace('\\n', '')\n\n current_content = raw_content[head:].lower()\n if content in current_content:\n new_beg = head + current_content.find(content)\n new_end = new_beg + len(content) - 1\n head = new_end + 1\n to_print_infos.append(LabelInfo(\n ID=ID,\n Category=info.Category,\n Pos_b=new_beg,\n Pos_e=new_end,\n Privacy=raw_content[new_beg: new_end+1]\n ))\n continue\n pattern = content \\\n .replace('\\\\', '\\\\\\\\') \\\n .replace('(', '\\\\(') \\\n .replace(')', '\\\\)') \\\n .replace('?', '\\\\?') \\\n .replace('.', '\\\\.') \\\n .replace('*', '\\\\*') \\\n .replace('+', '\\\\+') \\\n .replace(\"[UNK]\", \".+?\") \\\n .replace('[', '\\\\[') \\\n .replace(']', '\\\\]') \\\n .replace('{', '\\\\{') \\\n .replace('}', '\\\\}')\n m = re.search(pattern, current_content)\n if m is not None:\n new_beg = head + m.start()\n new_content = m.group()\n new_end = new_beg + len(new_content) - 1\n head = new_end + 1\n to_print_infos.append(LabelInfo(\n ID=ID,\n Category=info.Category,\n Pos_b=new_beg,\n Pos_e=new_end,\n Privacy=raw_content[new_beg: new_end+1]\n ))\n continue\n not_in_raw_count += 1\n if not_in_raw_count == 1:\n self.logger.log_message(\n signature, \"[{:d}] found mismatch(es):\".format(ID))\n self.logger.log_message(signature, \"pattern=\", pattern)\n self.logger.log_message(signature, '\\t', reader.dumps(info))\n\n for info in to_print_infos:\n csv_ofs.write(reader.dumps(info) + '\\n')\n email_count = 0\n mobile_QQ_count = 0\n if detect_email:\n for info in self.detect_email(ID, raw_content):\n email_count += 1\n csv_ofs.write(reader.dumps(info) + '\\n')\n for info in self.detect_mobile_and_QQ(ID, raw_content):\n mobile_QQ_count += 1\n csv_ofs.write(reader.dumps(info) + '\\n')\n\n if not_in_raw_count != 0:\n self.logger.log_message(\n signature, \"[{:d}] detect {:d} info not in raw\".format(ID, not_in_raw_count))\n\n return not_in_raw_count, email_count, mobile_QQ_count\n\n def trans_origin_to_raw(self, data_dir: str = None, detect_email=True):\n signature = \"trans_origin_to_raw()\\t\"\n\n input_csv = open(os.path.join(DefaultConfig.PATHS.DATA_INFO,\n \"predict_origin.csv\"), 'r', encoding='utf8')\n output_csv = open(os.path.join(\n DefaultConfig.PATHS.DATA_INFO, \"predict_raw.csv\"), 'w', encoding='utf8')\n\n reader = LabelFileReader()\n\n data_dir = data_dir if data_dir is not None else os.path.join(\n DefaultConfig.PATHS.DATA_CCF_RAW, \"test\")\n\n not_in_raw_count = 0\n email_count = 0\n mobile_QQ_count = 0\n current_id = 0\n infos = []\n for line in input_csv.readlines():\n info = reader.loads(line)\n if (info.ID == current_id):\n infos.append(info)\n continue\n not_in_raw_count_delta, email_count_delta, mobile_QQ_count_delta = \\\n self._print_infos_to_csv_for_id(\n ID=current_id,\n infos=infos,\n csv_ofs=output_csv,\n data_dir=data_dir,\n detect_email=detect_email)\n not_in_raw_count += not_in_raw_count_delta\n email_count += email_count_delta\n mobile_QQ_count += mobile_QQ_count_delta\n\n current_id = info.ID\n infos = [info]\n\n not_in_raw_count_delta, email_count_delta, mobile_QQ_count_delta = \\\n self._print_infos_to_csv_for_id(\n ID=current_id,\n infos=infos,\n csv_ofs=output_csv,\n data_dir=data_dir,\n detect_email=detect_email)\n not_in_raw_count += not_in_raw_count_delta\n email_count += email_count_delta\n mobile_QQ_count += mobile_QQ_count_delta\n\n self.logger.log_message(\n signature, \"not in raw count=\", not_in_raw_count)\n self.logger.log_message(signature, \"reg email count=\", email_count)\n self.logger.log_message(signature, \"reg mobile&QQ count=\", mobile_QQ_count)\n self.logger.log_message(signature, \"finish\")\n input_csv.close()\n output_csv.close()\n\n def final_format(self):\n signature = \"final_format()\\t\"\n self.logger.log_message(signature, \"start\")\n with open(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"predict_raw.csv\"), 'r', encoding='utf8') as f:\n lines = f.read().splitlines()\n self.logger.log_message(\n signature, \"total entity count=\", len(lines))\n all_content = list(set(lines))\n self.logger.log_message(\n signature, \"unique entity count=\", len(all_content))\n\n def get_beg(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return int(beg)\n\n def get_end(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return int(end)\n\n def get_id(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return int(ID)\n\n def is_single(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return int(beg) == int(end)\n\n def is_mobile(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return clz == \"mobile\"\n\n def is_QQ(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return clz == \"QQ\"\n\n def get_content(line: str) -> int:\n ID, clz, beg, end, content = tuple(line.split(','))\n return content\n all_content.sort(key=get_end)\n all_content.sort(key=get_beg)\n all_content.sort(key=get_id)\n self.logger.log_message(signature, \"len=0:\")\n self.logger.log_message(signature, \"length-1 entity count=\",\n sum(1 for line in all_content if is_single(line)))\n # for line in all_content:\n # if (is_mobile(line)):\n # self.logger.log_message(\n # signature, \"[mobiles]\\t\", get_content(line))\n # for line in all_content:\n # if (is_QQ(line)):\n # self.logger.log_message(signature, \"[QQ]\\t\", get_content(line))\n with open(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"predict.csv\"), 'w', encoding='utf8') as f:\n f.write('ID,Category,Pos_b,Pos_e,Privacy\\n')\n for content in all_content:\n if not is_single(content):\n f.write(content + \"\\n\")\n\n\nif __name__ == \"__main__\":\n # formatter = ReResultFormatter(os.path.join(DefaultConfig.PATHS.DATA_INFO, \"split_index_train.json\"))\n # formatter.combine_all(\n # origin_data_count=2515,\n # label_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train/label\"),\n # data_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_CLEANED, \"train/data\"))\n # formatter.trans_origin_to_raw(data_dir=os.path.join(DefaultConfig.PATHS.DATA_CCF_RAW, \"train/data\"))\n formatter = ReResultFormatter()\n formatter.combine_all(receive_rate = 0.7, result_dir=os.path.join(\n DefaultConfig.PATHS.DATA_CCF_CLEANED, \"test/label/80/30\"))\n formatter.trans_origin_to_raw()\n formatter.final_format()\n" }, { "alpha_fraction": 0.5528523325920105, "alphanum_fraction": 0.568791925907135, "avg_line_length": 35.06060791015625, "blob_id": "670c2922b8a86689376d8884f365123f2bd30ab5", "content_id": "435a1ac2ab16c4c3e746cc119a1b23db3db9b2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/core/model/bert_demo.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "# _*_coding:utf-8_*_\n# Author : JacquesdeH\n# Create Time : 2020/11/18 20:21\n# Project Name: NER-CCF2020-HeapOverflow\n# File : bert_demo.py\n# --------------------------------------------------\n\nimport torch\nfrom transformers import AutoTokenizer, AutoModel\n\nif __name__ == '__main__':\n _pretrained_model = 'bert-base-chinese'\n _special_tokens = {\"unk_token\": \"[UNK]\",\n \"sep_token\": \"[SEP]\",\n \"pad_token\": \"[PAD]\",\n \"cls_token\": \"[CLS]\",\n \"mask_token\": \"[MASK]\"\n }\n\n tokenizer = AutoTokenizer.from_pretrained(_pretrained_model)\n # cnt_added_tokens = tokenizer.add_special_tokens(_special_tokens)\n model = AutoModel.from_pretrained(_pretrained_model)\n # model.resize_token_embeddings(len(tokenizer))\n\n # input_ = \"[CLS]ๆˆ‘ๆ“ไฝ ๆ˜ฏๅ‚ป้€ผๅ—,ๆœ‹ๅ‹?[SEP]\"\n input_ = \"ๆˆ‘ๆ“ไฝ ๆ˜ฏๅ‚ป้€ผๅ—?ๆœ‹ๅ‹?\"\n tokens = tokenizer.tokenize(input_)\n # indexs = tokenizer.convert_tokens_to_ids(tokens)\n indexs = tokenizer.encode(tokens)\n tmp = tokenizer.convert_ids_to_tokens(indexs)\n indexs = torch.tensor(indexs, dtype=torch.long).reshape(1, -1)\n\n output = model.forward(indexs)\n\n\n" }, { "alpha_fraction": 0.828125, "alphanum_fraction": 0.828125, "avg_line_length": 31.5, "blob_id": "1363b37e629a82dba5f658b7d3c0404c7cb4abda", "content_id": "279b3fb5fa85e427075bd9a5a0d2e34cfdd0450c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/core/utils/__init__.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from .logger import alloc_logger\nfrom .logger import log_message" }, { "alpha_fraction": 0.5415040254592896, "alphanum_fraction": 0.5464481115341187, "avg_line_length": 34.748836517333984, "blob_id": "6cf72b12e733f052cd13df67c4a8bb9c52e5054c", "content_id": "d5c0c7fe10180bbe0c0bda397dad4c23d7e4c8e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7686, "license_type": "no_license", "max_line_length": 128, "num_lines": 215, "path": "/core/dataloader/dataloader.py", "repo_name": "JacquesdeH/NER-CCF2020-HeapOverflow", "src_encoding": "UTF-8", "text": "from ..config.DefaultConfig import DefaultConfig\nimport torch\nimport torch.utils.data as tud\nimport os\nimport json\nimport random\nfrom ..utils import alloc_logger\nfrom Main import args as tempArgs\n\nclass Iterator:\n def __init__(self, target, indices=[]):\n self.target = target\n self.index = -1\n self.indices = indices\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n self.index += 1\n if len(self.indices) > 0:\n return self.target[self.indices[self.index]]\n else:\n return self.target[self.index]\n except IndexError:\n raise StopIteration\n\n\nclass CCFDataset(tud.Dataset):\n def __init__(self, args, in_train=True):\n super().__init__()\n self.in_train = in_train\n self.args = args\n self.seq_len = args.seq_len\n self.label_dim = args.label_dim\n\n self.data_path = DefaultConfig.PATHS.DATA_CCF_CLEANED\n if self.in_train:\n self.data = self.data_path + \"/train/data\"\n self.label = self.data_path + \"/train/label\"\n else:\n self.data = self.data_path + \"/test/data\"\n self.label = \"\"\n\n self.data_file_list = os.listdir(self.data)\n if self.label != \"\":\n self.label_file_list = os.listdir(self.label)\n else:\n self.label_file_list = list()\n\n self.file_num = len(self.data_file_list)\n\n def __len__(self):\n return self.file_num\n\n '''\n data_content: str\n label_content: torch.Tensor with dtype = torch.long\n '''\n # FIXME: force length to SEQ_LEN now use config,\n def __getitem__(self, idx):\n if idx >= len(self):\n raise IndexError\n with open(self.data + '/' + self.data_file_list[idx], encoding=\"UTF-8\") as f_data:\n data_content = f_data.read()\n if self.in_train:\n with open(self.label + '/' + self.label_file_list[idx], encoding=\"utf8\") as f_label:\n label_list = json.load(f_label)\n # label_list = [[(1 if count == sequence else 0) for count in range(self.label_dim)] for sequence in label_list]\n if len(label_list) >= self.seq_len:\n label_list = label_list[:self.seq_len]\n else:\n label_list = [0] + label_list + [0 for count in range(self.seq_len - len(label_list) - 1)]\n # label_list += [0 for count in range(self.seq_len - len(label_list))]\n # label_list += [[0] * self.args.label_dim for count in range(self.seq_len - len(label_list))]\n label_content = torch.LongTensor(label_list)\n return data_content, label_content.to(self.args.device)\n else:\n return data_content\n\n\nclass CCFDataloader:\n def __init__(self, args, in_train=True, thread_num=1):\n self.args = args\n self.logger = alloc_logger(\"CCFDataloader.log\", CCFDataloader)\n self.in_train = in_train\n self.dataset = CCFDataset(args=args, in_train=self.in_train)\n self.batch_size = args.batch_size\n self.thread_num = thread_num\n self.file_num = len(self.dataset)\n self.dataset_index = list(range(self.file_num))\n self.logger.log_message(\"file num:\\t\", self.file_num)\n\n def __len__(self):\n return self.file_num // self.batch_size + (0 if self.file_num % self.batch_size == 0 else 1)\n\n def __iter__(self):\n return Iterator(self)\n\n def __getitem__(self, idx):\n if idx >= len(self):\n raise IndexError\n ret_size = self.batch_size\n if idx == len(self) - 1:\n ret_size = self.file_num % self.batch_size\n if self.in_train:\n data_contents = list()\n label_contents = 0\n for count in range(ret_size):\n data_content, label_content = self.dataset[self.dataset_index[idx * self.batch_size + count]]\n data_contents.append(data_content)\n label_content.unsqueeze_(0)\n if isinstance(label_contents, torch.Tensor):\n label_contents = torch.cat((label_contents, label_content))\n else:\n label_contents = label_content\n return data_contents, label_contents\n else:\n data_contents = list()\n for count in range(ret_size):\n data_content = self.dataset[self.dataset_index[idx * self.batch_size + count]]\n data_contents.append(data_content)\n return data_contents\n\n def shuffle(self):\n random.shuffle(self.dataset_index)\n\n\nclass KFold:\n def __init__(self, dataloader, k=10):\n self.k = k\n self.dataloader = dataloader\n self.dataloader_index = list(range(len(self.dataloader)))\n self.folds = list()\n fold_length = len(self.dataloader) // self.k + 1\n pre_index = - fold_length\n count = -1\n for index in self.dataloader_index:\n if index - pre_index == fold_length:\n count += 1\n pre_index = index\n if count == len(self.dataloader) % self.k:\n fold_length -= 1\n self.folds.append(list())\n self.folds[len(self.folds)-1].append(index)\n self.fold_count = 0\n self.fold_train = [index for index in self.dataloader_index if index not in self.folds[self.fold_count]]\n self.fold_valid = self.folds[self.fold_count]\n\n def __len__(self):\n return self.k\n\n def next_fold(self):\n self.fold_count += 1\n self.fold_count %= self.k\n self.fold_train = [index for index in self.dataloader_index if index not in self.folds[self.fold_count]]\n self.fold_valid = self.folds[self.fold_count]\n\n def get_train(self):\n return Iterator(self.dataloader, self.fold_train)\n\n def get_train_len(self):\n return len(self.fold_train)\n\n def get_valid(self):\n return Iterator(self.dataloader, self.fold_valid)\n\n def new_k_fold(self):\n self.dataloader.shuffle()\n self.fold_count = 0\n\n\nif __name__ == \"__main__\":\n '''\n ccf_dataloader = CCFDataloader(args=Tempargs, in_train=True)\n for i, (data_contents, label_contents) in enumerate(ccf_dataloader):\n if i == 5:\n break\n print(\"=============BATCH %d=============\" % i)\n print(data_contents)\n print(label_contents)\n '''\n ccf_dataloader = CCFDataloader(args=tempArgs, in_train=True)\n print(len(ccf_dataloader))\n k_fold = KFold(ccf_dataloader, 10)\n for fold_count in range(len(k_fold)):\n print(\"=============NEW FOLD============\")\n count = 0\n print('--------TRAIN--------')\n for data_content, label_content in k_fold.get_train():\n '''\n print('---train_%d---' % count)\n print(len(data_content))\n print(label_content.shape)\n '''\n if count == 0:\n print('---train_%d---' % count)\n print(len(data_content))\n print(len(label_content), label_content.device)\n count += 1\n count = 0\n print('--------VALID--------')\n for data_content, label_content in k_fold.get_valid():\n '''\n print('---valid_%d---' % count)\n print(len(data_content))\n print(label_content.shape)\n '''\n if count == 0:\n print('---valid_%d---' % count)\n print(len(data_content))\n print(len(label_content), label_content.device)\n count += 1\n k_fold.next_fold()\n" } ]
19
phuyals2/ECE2524_P3
https://github.com/phuyals2/ECE2524_P3
b5ee665017920fa4e10da51139c834cb189c35de
fbff1d699ebff1321316235894e6b38037b2861e
383501eca7f7de25787107288e59d46d5bf7cdc7
refs/heads/main
2023-01-24T15:29:51.564578
2020-12-04T21:54:04
2020-12-04T21:54:04
312,912,853
0
1
null
2020-11-14T22:25:40
2020-11-14T22:25:45
2020-11-26T10:13:51
null
[ { "alpha_fraction": 0.6731366515159607, "alphanum_fraction": 0.6847826242446899, "avg_line_length": 48.53845977783203, "blob_id": "3be707baaff54272b693d674ecc0d064c3085e2f", "content_id": "49d1e854c3c543eaad5e2cc44ee27f1fa77a30f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 126, "num_lines": 26, "path": "/README.md", "repo_name": "phuyals2/ECE2524_P3", "src_encoding": "UTF-8", "text": "## ECE2524_P3\n\nProject for ECE 2524\n\n## I have made a small change from the original proposal. \n\nThe program will no longer be invoked with additional command-line arguments as mentioned before. \nInstead, I have implemented a menu based system in order to make it easier for the user to interface with the program.\nI felt the original implementation could be difficult to understand for a novice user, so I changed it to be more interactive.\n\n## Description\n\nA menu-based python implementation to handle simple file management tasks.\nTasks handled are: \n\n 1] Generating a directory tree from user specified path\n 2] Arranging files in a directory by their size and displaying it in the console\n 3] Arranging files by the time they were created and printing them out in the console\n 4] Renaming files in a folder based on their extension.\n - The program will prompt the user to specify the extension type and a new name\n - Files will be renamed with the provided newname appended with numbers starting from 0. \n 5] Deleting files in a folder based on their extension.\n \n## Usage \n\nSimply download filemanager.py and run it.\n" }, { "alpha_fraction": 0.48231202363967896, "alphanum_fraction": 0.4871087670326233, "avg_line_length": 41.207252502441406, "blob_id": "3b1626df5b92f030c6c0fcb81f14dcab30dbd35a", "content_id": "c442d916b111de3e684953e03dbc3cae1acc8517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8339, "license_type": "no_license", "max_line_length": 144, "num_lines": 193, "path": "/filemanager.py", "repo_name": "phuyals2/ECE2524_P3", "src_encoding": "UTF-8", "text": "import sys\r\nimport os\r\nimport time\r\nfrom stat import S_ISREG, ST_CTIME, ST_MODE\r\n\r\n# This function prompts for a valid directory path.\r\n# If the entered path is invalid, it will keep prompting the user to input a valid path\r\ndef verify_path():\r\n my_path = input('Enter a directory path:')\r\n if os.path.isdir(my_path):\r\n return my_path\r\n else:\r\n print(\"Please enter a valid path!\\n \")\r\n dir_path = verify_path()\r\n return dir_path\r\n\r\n\r\n# This function is called when option 1 is selected from the menu.\r\n# It uses the os module to recursively traverse the user provided directory. \r\ndef traverse_directory(my_path):\r\n print(\"\\n\") \r\n for root, dirs, files in os.walk(my_path):\r\n level = root.replace(my_path, '').count(os.sep)\r\n shift = ' ' * 4 * level\r\n print(' {}{}: '.format(shift, \"Inside --> \" + os.path.basename(root)))\r\n bulleted = ' ' * 6 * (level + 2)\r\n for i in files:\r\n print(' {}{}'.format(bulleted, \"- \" + i))\r\n print(\"----------------------------------------------------------\\n\") \r\n \r\n\r\n # This is the function called to rename files. Parameters passed are the directory path, new name, and file extension.\r\n # This function will check if a file of given extension exists in a directory or not.\r\n # - If it exists, it will rename the file with the name passed by the user.\r\n # - It will also append numbers at the end of the new name starting from 0 ( This will depend on file's postion inside the for loop).\r\n # - If it does not exist, it will simply return false \r\ndef rename_files(path, new_name, file_type):\r\n check = False\r\n verify = False\r\n temp_files = os.listdir(path) # lists the files in the specified path\r\n files = sorted(temp_files)\r\n\r\n for index, file in enumerate(files):\r\n if file.endswith(file_type): # Checking if the filetype exists in the directory\r\n os.rename(os.path.join(path, file), os.path.join(path, ''.join([new_name, str(index), \".\" + file_type])))\r\n check = True\r\n verify = True\r\n else:\r\n check = False\r\n return verify \r\n\r\n# This is the function called when the user wants to delete a file of certain extension from a folder. \r\n# The parameters aree directory path and file extenstion.\r\n # - If a file of certain extension exists, it will simply delete that file.\r\n # - If it doesn't exist, it will return a boolean value of false. \r\ndef delete_files(path, file_type):\r\n check = False\r\n verify = False\r\n test = os.listdir(path)\r\n\r\n for item in test:\r\n if item.endswith(file_type):\r\n os.remove(os.path.join(path, item))\r\n check = True\r\n verify = True\r\n else:\r\n check = False\r\n\r\n return verify\r\n\r\n# This function will arrange the files a given directory based on the file size and print them out in the console.\r\n# It will arrange files in ascending order.\r\ndef arrange_by_size(path):\r\n os.chdir(path)\r\n # creating a dictionary to hold files\r\n my_dictionary = {}\r\n print(\"Arranging by size: \\n\")\r\n for files in os.listdir(path):\r\n if os.path.isfile(files):\r\n my_dictionary[files] = os.stat(files).st_size # this gives size in bytes\r\n\r\n print(f'\\tFiles {6 * \" \"}\\t File Size \\n ')\r\n\r\n for file, size in sorted(my_dictionary.items(), key=lambda s: (s[1], s[0])): # This will sort the files in required order\r\n print(f\"{file:<30} {size / 1000:.03f} KB\") # Formatting the output\r\n\r\n print(\"------------------------------------------------------------------------\\n\") \r\n \r\n \r\n # This function will arrange the files in the given directory by the date/time they were created and display them in the console \r\ndef arrange_by_time(path):\r\n my_list = (os.path.join(path, f) for f in os.listdir(path)) # listing the files in the specified path\r\n my_list = ((os.stat(path), path) for path in my_list) # listing the files with stat\r\n my_list = ((stat[ST_CTIME], path) for stat, path in my_list if S_ISREG(stat[ST_MODE]))\r\n\r\n print(f'\\tFiles {6 * \" \"}\\t\\t Date/Time created \\n ')\r\n for my_time, path in sorted(my_list):\r\n print(f\"{os.path.basename(path):<40}{time.ctime(my_time)} \")\r\n print(\"-----------------------------------------------------------------------\\n\")\r\n \r\n # This function simply prints the welcome screen menu \r\ndef print_menu():\r\n print(\"\\n\")\r\n print(\"\\t***************************\")\r\n print(\"\\t !!-- Welcome -- !! \")\r\n print(\"\\t***************************\")\r\n print(\"=================================================\\n\"\r\n \"\\t\\tAvailable options: \\n\"\r\n \"=================================================\\n\"\r\n \"\\t1] - Generate directory tree \\n\"\r\n \"\\t2] - List files by size\\n\"\r\n \"\\t3] - List files by creation date/time\\n\"\r\n \"\\t4] - Rename files \\n\"\r\n \"\\t5] - Delete files\\n\"\r\n \"\\t6] - Exit\\n\"\r\n \"=================================================\\n\")\r\n\r\n# This is the main function which will call different functions above based on the task selected by the user.\r\ndef main():\r\n print_menu()\r\n while True:\r\n print(\"[press 0 to view menu] \")\r\n user_input = input(\"Enter an option number from the menu: \")\r\n\r\n if user_input == \"0\":\r\n print(\r\n \"##################################################\\n\"\r\n \"\\t1] - Generate directory tree\\n\"\r\n \"\\t2] - List files by size\\n\"\r\n \"\\t3] - List files by date/time\\n\"\r\n \"\\t4] - Rename files based on extension\\n\"\r\n \"\\t5] - Delete files based on extension\\n\"\r\n \"\\t6] - Exit\\n\"\r\n \"##################################################\\n\")\r\n\r\n elif user_input == \"1\":\r\n print(\" \\n\")\r\n print(\"******-- Walking a directory --*******\")\r\n my_path = verify_path()\r\n traverse_directory(my_path)\r\n print(\"\\n\")\r\n\r\n elif user_input == \"2\":\r\n print(\" \\n\")\r\n print(\"******-- Arranging files by size --*****\")\r\n my_path = verify_path()\r\n arrange_by_size(my_path)\r\n\r\n elif user_input == \"3\":\r\n print(\" \\n\")\r\n print(\"******-- Arranging files by date/time --*****\")\r\n my_path = verify_path()\r\n arrange_by_time(my_path)\r\n\r\n elif user_input == \"4\":\r\n print(\"******-- Rename files --******\") \r\n my_path = verify_path()\r\n file_type = input(\"Enter the file(s) extension: \")\r\n new_name = input(\"Enter the new name for files: \")\r\n check = rename_files(my_path, new_name, file_type)\r\n\r\n if check is True:\r\n print(\" All the files of type \" + file_type + \" have been successfully renamed!!\\n\")\r\n print(\"----------------------------------------------------------\\n\")\r\n elif check is False:\r\n print(\" !!The specified file type does not exist in the provided directory!! \")\r\n print(\"----------------------------------------------------------\\n\")\r\n \r\n \r\n elif user_input == \"5\":\r\n print(\"******-- Delete files --******\")\r\n my_path = verify_path()\r\n file_type = input(\"Enter the file(s) extension: \")\r\n check = delete_files(my_path, file_type)\r\n if check is True:\r\n print(\" All the files of type \" + file_type + \" have been successfully deleted!!\\n\")\r\n print(\"----------------------------------------------------------\\n\")\r\n elif check is False:\r\n print(\" !!The specified file type does not exist in the provided directory!! \")\r\n print(\"----------------------------------------------------------\\n\")\r\n\r\n \r\n elif user_input == \"6\":\r\n print(\"Exiting...Thank You!!\")\r\n sys.exit(1)\r\n\r\n else:\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n print(\"!! Please select a valid option. !!\")\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n\r\n\r\nmain()\r\n" } ]
2
upandacross/warehouse_optimization
https://github.com/upandacross/warehouse_optimization
98ce101ed845fca7a044137fc11f9754112a8a58
c1bae87bcb3371a7073d75e147322b4aee459f6a
144b09ebc05ab1f157c680a5237055b57861af82
refs/heads/master
2020-06-01T03:56:41.364403
2019-06-22T02:56:10
2019-06-22T02:56:10
190,624,578
6
0
null
null
null
null
null
[ { "alpha_fraction": 0.6058648228645325, "alphanum_fraction": 0.6071073412895203, "avg_line_length": 37.32381057739258, "blob_id": "0240e5bee41ad6c47695c4c80efebd23471e954e", "content_id": "b0ea9530dcd4671ea7463d475970181862734aa6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4024, "license_type": "permissive", "max_line_length": 132, "num_lines": 105, "path": "/Warehouse/Inventory.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nfrom Warehouse.Bin import Bin\n \nclass Inventory:\n '''\nA singleton class, Inventory serves much like a factory using closure to return instances that\nrefer to a single hidden class within of which there is a single instance. References to the\nhidden attributes are accomplished by a custom __getattr__ method. Updates to the\nhidden attributes are accomplished by a custom __setattr__ method.\n\nThere are no arguments to __init__.\n\nAssumptions:\n each bin location contains only one item and, therefore each location occurs once in the inventory\n'''\n\n __stock = defaultdict(set) # Inventory.__stock[item] = locations that can be bassed to Bin.get_bin_by_location(location)\n\n def __init__(self):\n pass\n \n \n @classmethod\n def clear(cls):\n cls.__stock = defaultdict(set) # cls.__stock[item] = location\n \n \n @classmethod\n def update_bin(cls, location, item_no, qty):\n '''\n add or subtract qty in bin at location\n if item being updated != item in bin at location, bin.__item, bin.__count = item_no, qty\n \n Assumption:\n if called to update bin with new item, then all inbins with old item will be replaced everywhere. \n Use Bin.__stock_bin to update individual bins but be sure to only update bin instances that live in Inventory\n'''\n assert isinstance(location, Bin.Bin_Location), 'location SBE instance of Bin.Bin_Location, is {}'.format(location.__class__)\n assert isinstance(qty, int), 'qty mst be int'\n assert isinstance(item_no, int) and item_no > 0, 'item_no must be int > 0'\n \n b = Bin.get_bin_by_location(location)\n if b is None:\n b = Bin(rack_no=location.rack, side=location.side, bin_no=location.bin_no)\n if b.item != item_no:\n try:\n cls.__stock[item_no].discard(location)\n except:\n pass\n b.stock_bin(item_no, qty)\n cls.__stock[item_no].add(b.location)\n pass\n @classmethod\n def get_location_bin(cls, location):\n assert isinstance(location, Bin.Bin_Location), 'location must be an Bin.Bin_Location, is {}'.format(location)\n b = Bin.get_bin_by_location(location)\n return b\n\n\n @classmethod\n def get_stock_qty(cls, item_no=None, location=None):\n '''\n First, if location is not None return bin.count @ location, otherwise check item_no is not None\n If item_no is not None, return quantity of item_no at all location.\n If both item_no and location are None or both are Not None, error\n'''\n assert item_no is not None or location is not None, 'either item_no or location are not None'\n assert item_no is None or location is None, \\\n 'either item_no or location are not None, NOT both'\n if location is not None:\n b = Bin.get_bin_by_location(location)\n return (b.item, b.count) # it is caller's responsibility to check b.item == item_no\n \n elif item_no is None or item_no not in cls.__stock: # item_no is not None by assertion above\n return (item_no, 0)\n else:\n return (item_no, sum([Bin.get_bin_by_location(loc).count for loc in cls.__stock[item_no]]))\n\n\n def __repr__(self):\n if len(self.stock.values()) > 0:\n qty = sum(\n [sum(Bin.get_bin_by_location(loc).count for loc in Inventory.__stock[itm]\n )\n for itm in Inventory.__stock.keys()\n ]\n )\n else:\n qty = 0\n return 'Inventory: {:,d} items, {:,d} total quantity'\\\n .format(len(self.stock), qty)\n\n\n def __str__(self):\n return Inventory.__repr__(self)\n\n\n @property\n def stock(self):\n return type(self).__stock\n\n @stock.setter\n def stock(self, args):\n raise RuntimeError('stock is maintained in inventory via Bin stocking')\n" }, { "alpha_fraction": 0.6171355843544006, "alphanum_fraction": 0.6319766640663147, "avg_line_length": 28.290666580200195, "blob_id": "22a61ce0b955903ed5095329a8456a2c7c820073", "content_id": "8177f471c99425956a77fcb7057886b9f2b64e7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10983, "license_type": "permissive", "max_line_length": 123, "num_lines": 375, "path": "/runWhOpt.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "#!/home/bren/miniconda3/bin/python3.7\n# encoding: utf-8\n'''\nrunWhOpt \n\n\trun genetic algorithm to find inventory stocking that minimizes distance traveled to pick orders\n\t\n\texamples:\n\t\n\t\trunWhOPt 5 5 # run simulation of warehouse with 5 racks of 5 bins on each side\n\n@author: user_name\n\n@copyright: 2019 TLA. All rights reserved.\n\n@license: MIT\n\n@contact: user_email\n@deffield updated: Updated\n'''\n\nfrom collections.abc import Iterable\nimport heapq\nfrom itertools import product\nimport multiprocessing\nimport numpy as np\nfrom numpy import array as nparray, roll\nfrom numpy.random import choice, randint, random, seed\nimport os\nimport pickle\nimport sys\nfrom time import ctime\nfrom Warehouse.Individual import Individual\nfrom Warehouse.Order import Order\nfrom Warehouse.PickRoute import PickRoute\nfrom Warehouse.Warehouse import Warehouse\n\n\nfrom argparse import ArgumentParser\nfrom argparse import RawDescriptionHelpFormatter\n\n\n__all__ = []\n__version__ = 0.1\n__date__ = '2019-06-17'\n__updated__ = '2019-06-17'\n\nDEBUG = 0\nTESTRUN = 0 # will only evaluate fitness of limited number of pop, cross-over, mutate\nRESULTS = 1 # print stats for each gen\nPROFILE = 0\n\nclass CLIError(Exception):\n\t'''Generic exception to raise and log different fatal errors.'''\n\tdef __init__(self, msg):\n\t\tsuper(CLIError).__init__(type(self))\n\t\tself.msg = \"E: %s\" % msg\n\tdef __str__(self):\n\t\treturn self.msg\n\tdef __unicode__(self):\n\t\treturn self.msg\n\ndef main(argv=None): # IGNORE:C0111\n\t'''Command line options.'''\n\n\tif argv is None:\n\t\targv = sys.argv\n\telse:\n\t\tsys.argv.extend(argv)\n\n\tprogram_name = os.path.basename(sys.argv[0])\n\tprogram_version = \"v%s\" % __version__\n\tprogram_build_date = str(__updated__)\n\tprogram_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)\n\tprogram_shortdesc = __import__('__main__').__doc__.split(\"\\n\")[1]\n\tprogram_license = '''%s\n\n\tCreated by user_name on %s.\n\tCopyright 2019 organization_name. All rights reserved.\n\n\tLicensed under the Apache License 2.0\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\n\tDistributed on an \"AS IS\" basis without warranties\n\tor conditions of any kind, either express or implied.\n\nUSAGE\n''' % (program_shortdesc, str(__date__))\n\n\ttry:\n\t\t# Setup argument parser\n\t\tparser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)\n\t\tparser.add_argument(\"racks\", type=int,\n\t\t\t\t\t\thelp=\"intiger number of warehouse racks\")\n\t\tparser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"count\",\n\t\t\t\t\t\thelp=\"set verbosity level [default: %(default)s]\")\n\t\tparser.add_argument(\"bins\", type=int,\n\t\t\t\t\t\thelp=\"integer number of bins on one side of rack.\")\n\t\tparser.add_argument('-V', '--version', action='version', version=program_version_message)\n\t\tparser.add_argument('-g', '--generations', action='store', dest='gens', type=int,\n\t\t\t\t\t\thelp='integer number of generations of evolution. Default 1',\n\t\t\t\t\t\tdefault=1)\n\t\tparser.add_argument('-r', '--restore', action='store', dest='restore', type=str,\n\t\t\t\t\t\thelp='Restore prior version of population from file',\n\t\t\t\t\t\tdefault='')\n\n\t\t# Process arguments\n\t\targs = parser.parse_args()\n\n\texcept KeyboardInterrupt:\n\t\t### handle keyboard interrupt ###\n\t\treturn 0\n\texcept Exception as e:\n\t\tif DEBUG or TESTRUN:\n\t\t\traise(e)\n\t\tindent = len(program_name) * \" \"\n\t\tsys.stderr.write(program_name + \": \" + repr(e) + \"\\n\")\n\t\tsys.stderr.write(indent + \" for help use --help\")\n\t\treturn 2\n\t\n\t\n\t########################################################################\n\t# this will be parallelized - TBD: create class MyGlobals and pickle it for \n\t# multiprocesses\n\t\n\tclass MyEnv:\n\t\tdef __init__(self, args, CXPB=0.7):\n\t\t\trestore = args.restore\n\t\t\tif len(restore) > 0:\n\t\t\t\tif not os.path.isfile(restore):\n\t\t\t\t\tprint('restore file {} does not exist'.format(restore))\n\t\t\t\t\tsys.exit(2)\n\t\t\t\telse:\n\t\t\t\t\tself.RESTORE=True\n\t\t\telse:\n\t\t\t\t\tself.RESTORE=False\n\t\n\t\t\tself.verbose = args.verbose\n\t\t\tracks = args.racks\n\t\t\tbins = args.bins\n\t\t\t\n\t\t\tself.NGENS = args.gens\n\n\t\t\tif self.verbose:\n\t\t\t\tprint(\"Verbose mode on\")\n\t\n\t\t\tif racks > 0:\n\t\t\t\tself.NUM_RACKS = racks\n\t\t\t\tif self.verbose:\n\t\t\t\t\tprint(\"{} racks\".format(racks))\n\t\t\telse:\n\t\t\t\traise RuntimeError('must be int > 0 number of racks')\n\t\n\t\t\tif bins > 0:\n\t\t\t\tself.NUM_RACK_SIDE_BINS = bins\n\t\t\t\tif self.verbose:\n\t\t\t\t\tprint(\"{} bins\".format(bins))\n\t\t\telse:\n\t\t\t\traise RuntimeError('must be int > 0 number of bins on each side of rack')\n\t\t\t\n\t\t\tself.NUM_BINS = self.NUM_RACKS * self.NUM_RACK_SIDE_BINS * 2 # two sides to a rack\n\t\t\tself.INDXS = np.arange(self.NUM_BINS)\n\t\t\tself.NUM_ORDERS = self.NUM_BINS * 10\n\t\t\tself.ORDER_LINES = 15\n\t\n\t\t\tself.CXPB=CXPB\n\t\t\tself.MUTPB=1.0 - CXPB\n\n\t\t\tself.wh = Warehouse(self.NUM_RACKS, self.NUM_RACK_SIDE_BINS)\n\t\n\t\t\tself.rsb = [(r, s, b) for r, s, b in product(range(self.NUM_RACKS), list('ab'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t range(1, self.NUM_RACK_SIDE_BINS + 1, 1))]\n\t\t\tself.idxs = np.arange(self.NUM_BINS)\n\n\t\t\tself.orders = []\t\n\t\t\tseed(42)\n\t\t\tfor _ in range(self.NUM_ORDERS):\n\t\t\t\to = Order()\n\t\t\t\to_items = choice(range(1, self.NUM_BINS + 1), size=self.ORDER_LINES, replace=False)\n\t\t\t\tfor itm in o_items:\n\t\t\t\t\to.add_line(item_no=int(itm), qty=10)\n\t\t\t\tself.orders.append(o)\n\t\n\t\t\tif self.RESTORE:\n\t\t\t\twith open(restore, 'rb') as infile:\n\t\t\t\t\tself.pop = pickle.load(infile)\n\t\t\t\tplen = len(self.pop[0])\n\t\t\t\t# reset pop_id and fitness = 0\n\t\t\t\tself.pop = [Individual(shape=(plen,), buffer=p) for p in self.pop]\n\t\t\t\tpass # for debugging\t\n\t\t\telse:\n\t\t\t\tself.pop = list()\n\t\t\t\tfor _ in range(1, self.NUM_BINS + 1):\n\t\t\t\t\t# individulas contain a shuffled list of item_no\n\t\t\t\t\t# bins contain a fixed qty of an item_no selected from a shuffled list\n\t\t\t\t\t# the fitness attribute will be set to number of steps required to fulfill orders from items when so distributed in wh\n\t\t\t\t\tself.pop.append(Individual(shape=(self.NUM_BINS,)))\n\t\n\n\tmyenv = MyEnv(args)\n\t\n\tif myenv.verbose or RESULTS: \n\t\tprint('simulate warehouse with {} racks and {} bins on each side'.format(myenv.NUM_RACKS, myenv.NUM_BINS))\n\t\n\t##########################################################\n\t# support functions\n\t\n\tdef evalFitness(individual):\n\t\tif individual.fitness != Individual.default_fitness():\n\t\t\treturn individual.fitness\n\t\t\n\t\tmyenv.wh.clear()\n\t\n\t\tfor i, (r, s, bn) in zip(individual, myenv.rsb):\n\t\t\tif s == 'a':\n\t\t\t\tb = myenv.wh.racks[r].bins_a[bn]\n\t\t\telse: # s == 'b'\n\t\t\t\tb = myenv.wh.racks[r].bins_b[bn]\n\t\t\tmyenv.wh.update_stock(int(i), 10, b.location)\n\t\t\tpass # for debugging\n\n\t\ttot_dist = sum(PickRoute(myenv.wh, o).route_distance\n\t\t\t\t\t\tfor o in myenv.orders) \n\t\t\n\t\treturn tot_dist\n\n\tdef mutSwap(individual, num):\n\t\tassert isinstance(individual, Iterable), 'mutSwap individual SBE iterable'\n\t\tassert isinstance(num, int) and num >=2 and num < len(individual),\\\n\t\t\t\t'mutSwap 2nd arg SBE int in [2,len(ind)]'\n\t\tinda = individual.copy()\n\t\tf = choice(range(len(inda)), size=num, replace=False) # needed if num > 2\n\t\torig = inda[f] # from\n\t\tt = roll(f, -1) # swap locations\n\t\tinda[t] = orig # to\n\n\t\treturn Individual(shape=(myenv.NUM_BINS,), buffer=inda)\n\n\n\tdef partMatched(mom, pop):\n\t\tdad = mom\n\t\twhile dad is mom:\n\t\t\tdad = pop[randint(1, len(pop))]\n\t\tchild_mom, child_dad = np.array(mom), np.array(dad)\n\t\t\n\t\t# TBD size SBE randint(4, 2 * (NUM_BINS // 4)), an even number\n\t\t\n\t\t# at 4, 2 for mom and 2 for dad\n\t\tdests = np.random.choice(myenv.idxs, size=6, replace=False)\n\t\t\n\t\tparents = (mom, dad)\n\t\tchildren = (child_dad, child_mom)\n\t\tfor p, gene_loc in enumerate(dests):\n\t\t\ts = p + 1\n\t\t\tparent = parents[s % 1] # alternate parent\n\t\t\tchild = children[s % 1] # if parent is mom, child SBE child_dad\n\t\t\tparent_gene = parent[gene_loc]\n\t\t\tchild_gene_replaced = child[gene_loc]\n\t\t\tchild_gene_replaced_loc = myenv.INDXS[np.isin(child, parent_gene)][0]\n\t\t\tchild[[gene_loc, child_gene_replaced_loc]] = [parent_gene, child_gene_replaced]\n\t\t\t\n\t\treturn (Individual((myenv.NUM_BINS,), buffer=child_mom),\n\t\t\t\tIndividual((myenv.NUM_BINS,), buffer=child_dad))\t\n\n\t\n\tdef calcFitness(individual):\n\t\tindividual.fitness = evalFitness(individual)\n\t\t\n\t\n\t######################################################\n\t# Simulation starts here\n\t######################################################\n\t\n\n\tfor pop_no, ind in enumerate(myenv.pop):\n\t\tind.fitness = evalFitness(ind)\n\t\tif myenv.verbose:\n\t\t\tmsg = 'gen {:2d}: stock config {:4,d}, dist={:6,d}'.format(0, pop_no + 1, int(ind.fitness))\n\t\t\tprint(msg)\n\t\tif TESTRUN and pop_no >= 1:\n\t\t\tbreak\n\n\t\n\tif RESULTS:\n\t\td = nparray([x.fitness for x in myenv.pop])\n\t\tmsg = 'gen {:2d} {:10s}: max: {:6,d}, min: {:6,d}, mean: {:.2f}, std: {:.2f}'\\\n\t\t\t\t.format(0, 'pop', d.max(), d.min(), d.mean(), d.std())\n\t\tprint(msg)\n\n\tfor gen in range(myenv.NGENS):\n\t\tcrossed_over = []\n\n\t\tfor pop_no, ind in enumerate(myenv.pop):\n\t\t\tprobCx = random()\n\t\t\tif probCx <= myenv.CXPB:\n\t\t\t\tc1, c2 = partMatched(ind, myenv.pop)\n\t\t\t\tc1.fitness = evalFitness(c1)\n\t\t\t\tc2.fitness = evalFitness(c2)\n\t\t\t\tcrossed_over.append(c1)\n\t\t\t\tcrossed_over.append(c2)\n\t\t\t\tif myenv.verbose:\n\t\t\t\t\tmsg = 'gen {:2d}: crossover {:3,d}, dist1={:6,d}, dist2={:6,d}'.format(gen,pop_no + 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t int(c1.fitness),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t int(c2.fitness))\n\t\t\t\t\tprint(msg)\n\t\t\t\tif TESTRUN and pop_no >= 2:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\thof = myenv.pop + crossed_over\n\t\theapq.heapify(hof)\n\t\tmyenv.pop = heapq.nsmallest(myenv.NUM_BINS, hof)\n\t\t\n\t\tif RESULTS:\n\t\t\td = nparray([x.fitness for x in myenv.pop])\n\t\t\tmsg = 'gen {:2d} {:10s}: max: {:6,d}, min: {:6,d}, mean: {:.2f}, std: {:.2f}'\\\n\t\t\t\t\t.format(gen, 'crossover', d.max(), d.min(), d.mean(), d.std())\n\t\t\tprint(msg)\n\t\t\t\n\t\t# Done if no opportunity to evolve?\n\t\tif d.max() == d.min() and d.std() == 0.0:\n\t\t\tbreak\n\n\t\tmutated = []\n\n\t\tfor pop_no, ind in enumerate(myenv.pop):\n\t\t\tprobMut = random()\n\t\t\tif probMut <= myenv.MUTPB:\n\t\t\t\tc1 = mutSwap(nparray(ind), 2)\n\t\t\t\tc1.fitness = evalFitness(c1)\n\t\t\t\tif myenv.verbose:\n\t\t\t\t\tmsg = 'gen {:2d}: {:10s} {:3,d}, dist={:6,d}'\\\n\t\t\t\t\t\t\t.format(gen, 'mutate', pop_no + 1, int(c1.fitness))\n\t\t\t\t\tprint(msg)\n\t\t\t\tmutated.append(c1)\n\t\t\t\tif TESTRUN and pop_no >= 2:\n\t\t\t\t\tbreak\n\t\t\n\t\thof = myenv.pop + mutated\n\t\theapq.heapify(hof)\n\t\tmyenv.pop = heapq.nsmallest(myenv.NUM_BINS, hof)\n\n\t\tif RESULTS:\n\t\t\td = nparray([x.fitness for x in myenv.pop])\n\t\t\tmsg = 'gen {:2d} {:10s}: max: {:6,d}, min: {:6,d}, mean: {:.2f}, std: {:.2f}'\\\n\t\t\t\t\t.format(gen, 'mutate', d.max(), d.min(), d.mean(), d.std())\n\n\t\t\tprint(msg)\n\t\t\n\t\t# Done if no opportunity to evolve?\n\t\tif d.max() == d.min() and d.std() == 0.0:\n\t\t\tbreak\n\n\tif not DEBUG:\n\t\t_, m, d, t, y = ctime().split(' ')\n\t\twith open('pop_{}.pkl'.format(m+d+t+y), 'wb') as ofile:\n\t\t\tpickle.dump(myenv.pop, ofile)\n\t\n\tpass # for debugging\n\t\nif __name__ == \"__main__\":\n\tif TESTRUN:\n\t\timport doctest\n\t\tdoctest.testmod()\n\tif PROFILE:\n\t\timport cProfile\n\t\timport pstats\n\t\tprofile_filename = 'runWhOpt_profile.txt'\n\t\tcProfile.run('main()', profile_filename)\n\t\tstatsfile = open(\"profile_stats.txt\", \"w\")\n\t\tp = pstats.Stats(profile_filename, stream=statsfile)\n\t\tstats = p.strip_dirs().sort_stats('cumulative')\n\t\tstats.print_stats()\n\t\tstatsfile.close()\n\t\tsys.exit(0)\n\tsys.exit(main())" }, { "alpha_fraction": 0.6808510422706604, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 46, "blob_id": "1b8b18c3bcfdf08c7ac18808eface37c852e84f7", "content_id": "cabec6155fc47a43b50fa8098d469e8a801a89e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "permissive", "max_line_length": 68, "num_lines": 2, "path": "/test/__init__.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "name = \"Warehouse_tests\"\n#__all__ = [Bin, Inventory, Order, Rack, Warehouse, circular_queue]\n" }, { "alpha_fraction": 0.5883110165596008, "alphanum_fraction": 0.5950051546096802, "avg_line_length": 33.07017517089844, "blob_id": "180abcf5ad5737264ce3dac90634101e0830dd3f", "content_id": "2351e35070abe21a498d3eeaccabe429d3084e97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3884, "license_type": "permissive", "max_line_length": 130, "num_lines": 114, "path": "/Warehouse/Warehouse.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\nfrom Warehouse.Rack import Rack\n\nclass Warehouse:\n '''\nA warehouse has inventory, racks, and a dock. Racks contain bins. Warehouse assigns a\nlat / long to bins to allow computation of distance traveled from one bin to another.\n\nThe init args are:\n racks, int > 0 for how many racks are in warehouse\n bins, int > 0 for how many bins are in each rack\n \nAssumptions:\n coordinates of warehouse's lower-left corner is (0, 0)\n dock is single location with lat == 0 and long centered WRT number of racks \n bins_a[1] and bins_[1] are for bin 1\n shortest route from one bin to another goes through the rack end (cap) closest to the destination bin\n'''\n\n __instance = None\n __inventory = Inventory()\n\n @classmethod\n def clear(cls):\n Inventory.clear()\n\n @classmethod\n def reset(cls, racks, bins):\n cls.__instance = None # clear __instance so __init__ doen't fail\n cls.__instance = Warehouse(racks=racks, bins=bins) # Now the initialization can be called\n Inventory.clear()\n return cls.__instance\n\n def __init__(self, racks=None, bins=None):\n\n if type(self).__instance is None:\n # Initialization\n assert isinstance(racks, int) and racks > 0, \\\n 'number of racks must be int > 0'\n assert isinstance(bins, int) and bins > 0, \\\n 'number of bins must be int > 0'\n\n type(self).__instance = self\n\n dock_rack = round((racks / 2) + .1) # friggin \"Banker's rounding!\n self.__dock = Bin(rack_no=dock_rack, side='a', bin_no=0)\n self.__dock.nearest_cap = self.__dock\n self.__dock.nearest_cap_distance = 0\n self.__racks_bins = (racks, bins)\n self.__racks = [Rack(rack_no = x, bin_count=bins) \n for x in range(1, racks + 1, 1)]\n self.__bins = [list(r.bins_a.values()) + list(r.bins_b.values())\n for r in self.__racks][0]\n else:\n self.__dict__ = Warehouse.__instance.__dict__\n\n def __repr__(self):\n return '''racks: {}, dock (lat, long): {}, inventory has {:,d} item_no, {:,d} quantities'''\\\n .format(len(self.__racks), (self.__dock.lat, self.__dock.long),\n len(Warehouse.__inventory.stock),\n sum(b.count for b in Bin.bin_locations.values()))\n \n\n @classmethod\n def update_stock(cls, item_no, qty, location):\n '''\n Use Inventory.update_stock if called w/o location or with location == None so qty will be divided among bins holding item\n Use Inventory.update_bin if called with location\n'''\n assert isinstance(location, Bin.Bin_Location), 'update_stock SBE called with nlocation = None or instance of Bin_Location'\n cls.__inventory.update_bin(location, item_no, qty)\n\n\n def get_stock_qty(self, item_no):\n return Warehouse.__inventory.get_stock_qty(item_no)\n\n\n @property\n def racks(self):\n return self.__racks\n\n @racks.setter\n def racks(self, *args):\n assert 1 == 0, 'racks attribute set by init only'\n return\n\n @property\n def racks_bins(self):\n return self.__racks_bins\n\n @racks_bins.setter\n def racks_bins(self, args):\n assert 0 == 1, 'number of racks and bins fixed at instantiation'\n\n @property\n def dock(self):\n return self.__dock\n\n @dock.setter\n def dock(self, *arg):\n assert 1 == 0, 'dock lat/long set by init only'\n\n @property\n def stock(self):\n return Warehouse.__inventory.stock\n \n @property\n def inventory(self):\n return self.__inventory\n \n @inventory.setter\n def inventory(self, args):\n raise RuntimeError('inventory attribute set by init')\n" }, { "alpha_fraction": 0.5343511700630188, "alphanum_fraction": 0.5558639764785767, "avg_line_length": 23.03333282470703, "blob_id": "357a4f89594c931c1bec3d98e6d6e56379c56544", "content_id": "951255d0697718f6c5c31ead860f6c7b9d35be90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1441, "license_type": "permissive", "max_line_length": 127, "num_lines": 60, "path": "/test/test_order.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 28, 2019\n\n@author: bren\n'''\nimport imp\nimport sys\nimport unittest\nimport Warehouse\nfrom Warehouse.Order import Order\n\n\nclass TestOrder(unittest.TestCase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def testInit(self):\n Order.clear()\n\n o1 = Order()\n self.assertEqual(o1.order_no, 1, 'first order SBE order_no 1, is {}'.format(o1.order_no))\n o2 = Order()\n self.assertEqual(o2.order_no, 2, 'first order SBE order_no 2, is {}'.format(o2.order_no))\n\n\n def testAddLine(self):\n # add lines - line number should increment\n Order.clear()\n\n o = Order()\n o.add_line(item_no=1, qty=50)\n o.add_line(item_no=2, qty=73)\n lines = o.lines\n self.assertEqual(len(lines), 2, 'SBE 2 lines in order, is {}'.format(len(lines)))\n for ln, (l, itmn_q) in enumerate(zip(lines, ((1, 50), (2, 73)))):\n self.assertEqual((l.item_no, l.qty), itmn_q, 'line_no {} - item_no, qty SBE {}, is {}'.format(ln, itmn_q, itmn_q))\n\n\n def testException(self):\n o = Order()\n \n try:\n o.lines = []\n raise ValueError('trying to set lines should raise exception')\n except ValueError as e:\n print(e)\n sys.exit(-1)\n except Exception:\n pass\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" }, { "alpha_fraction": 0.642730176448822, "alphanum_fraction": 0.6469960808753967, "avg_line_length": 31.662790298461914, "blob_id": "50620ed2caaa1cfab10b965bb69fd5ea276b3829", "content_id": "00109516c47ac23f0feeb01cc535a0141f5d2826", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2813, "license_type": "permissive", "max_line_length": 81, "num_lines": 86, "path": "/Warehouse/Individual.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from collections.abc import Iterable\nimport numpy as np\n\n\nclass Individual(np.ndarray):\n\t'''\n from the numpy documentation, we subclass ndarray\nto add a 'fitness' attribute\n'''\n\t__last_pop_id = 1\n\t__default_fitness = 1e10\n\t\n\tdef __new__(cls, shape, dtype=int, buffer=None, offset=0,\n\t\t strides=None, order=None, pop_id=None, fitness=int(__default_fitness)):\n\t\t# Create the ndarray instance of our type, given the usual\n\t\t# ndarray input arguments. This will call the standard\n\t\t# ndarray constructor, but return an object of our type.\n\t\t# It also triggers a call to Individual.__array_finalize__\n\t\tif buffer is None:\n\t\t\tbuffer = np.array([x for x in range(1, 1 + shape[0])])\n\t\t\tnp.random.shuffle(buffer)\n\t\tobj = super(Individual, cls).__new__(cls, shape, dtype,\n\t\t buffer, offset, strides,\n\t\t order)\n\t\t# set the new 'info' attribute to the value passed - default 0\n\t\tobj.fitness = fitness\n\t\tif pop_id is None:\n\t\t\tobj.pop_id = Individual.__last_pop_id\n\t\t\tIndividual.__last_pop_id += 1\n\t\telse:\n\t\t\tobj.pop_id = pop_id\n\t\t\t\n\t\t# Finally, we must return the newly created object:\n\t\treturn obj\n\t\n\tdef __array_finalize__(self, obj):\n\t\t# ``self`` is a new object resulting from\n\t\t# ndarray.__new__(Individual, ...), therefore it only has\n\t\t# attributes that the ndarray.__new__ constructor gave it -\n\t\t# i.e. those of a standard ndarray.\n\t\t#\n\t\t# We could have got to the ndarray.__new__ call in 3 ways:\n\t\t# From an explicit constructor - e.g. Individual():\n\t\t# obj is None\n\t\t# (we're in the middle of the Individual.__new__\n\t\t# constructor, and self.info will be set when we return to\n\t\t# Individual.__new__)\n\t\tif obj is None:\n\t\t\treturn\n\t\t# From view casting - e.g arr.view(Individual):\n\t\t# obj is arr\n\t\t# (type(obj) can be Individual)\n\t\t# From new-from-template - e.g infoarr[:3]\n\t\t# type(obj) is Individual\n\t\t#\n\t\t# Note that it is here, rather than in the __new__ method,\n\t\t# that we set the default value for 'info', because this\n\t\t# method sees all creation of default objects - with the\n\t\t# Individual.__new__ constructor, but also with\n\t\t# arr.view(Individual).\n\t\tself.fitness = getattr(obj, 'fitness', None)\n\t\t# We do not need to return anything\n\t\t\n\tdef __lt__(self, other):\n\t\tif self.fitness < other.fitness:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\n\tdef __eq__(self, other):\n\t\tif isinstance(other, type(self)) and self.fitness == other.fitness:\n\t\t\treturn True\n\t\telif isinstance(other, Iterable):\n\t\t\treturn np.all(np.array(self) == other)\n\t\telse:\n\t\t\treturn False\n\t\t\n\tdef __getitem__(self, idx):\n\t\treturn super(type(self), self).__getitem__(idx)\n\t\n\tdef __setitem__(self, var):\n\t\traise RuntimeError('Individual is immutable')\n\n\t@classmethod\n\tdef default_fitness(cls):\n\t\treturn cls.__default_fitness\n\t\n\t\n" }, { "alpha_fraction": 0.602173924446106, "alphanum_fraction": 0.6069565415382385, "avg_line_length": 21.320388793945312, "blob_id": "87aad7d535ea4d0e16b0e0ef23fc964c2b1c2e3b", "content_id": "e7a9227513c28e99f3d1c7efc08c11a2fc75e57b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2300, "license_type": "permissive", "max_line_length": 100, "num_lines": 103, "path": "/Warehouse/Order.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from Warehouse.Inventory import Inventory\nfrom builtins import property\n\nclass Order:\n\t\"\"\"\n\tOrder contains order_lines which contain order_items\n\t\n\tAssumptions:\n\t\tIt is permissable to add items to order that are not in Inventory\n\"\"\"\n\n\tclass OrderItem:\n\t\t'''\n\t\tOrderItem is class for contents of order line item\n'''\n\t\tdef __init__(self, line_no, item_no, qty, status='ordered'):\n\t\t\tself.__line_no = line_no \n\t\t\tself.__item_no = item_no \n\t\t\tself.__qty = qty \n\t\t\tself.__status = status \n\t\t\n\t\tdef __repr__(self):\n\t\t\treturn 'line_no: {}, item_no: {}, qty: {}, line status: {}'.format(self.line_no,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.item_no,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.qty,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.status)\n\t\t\t\n\t\t@property\n\t\tdef line_no(self):\n\t\t\treturn self.__line_no\n\t\t\n\t\t@property\n\t\tdef item_no(self):\n\t\t\treturn self.__item_no\n\t\t\n\t\t@property\n\t\tdef qty(self):\n\t\t\treturn self.__qty\n\t\t\n\t\t@property\n\t\tdef status(self):\n\t\t\treturn self.__status\n\t\t\n\t\[email protected] \n\t\tdef status(self, stat):\n\t\t\tself.__status = stat\n\t\t\t\n\n\t##############################\n\t# Order class variabiles\n\t\n\t__last_order_no = 0\n\t\n\t__inventory = Inventory()\n\t\n\t\n\t@classmethod\n\tdef clear(cls):\n\t\t'''\n\t\tclear needed to keep unit tests independent\n'''\n\t\tcls.__last_order_no = 0\n\t\tpass # for debugging\n\t\n\t@classmethod\n\tdef last_order_no(cls):\n\t\treturn cls.__last_order_no\n\n\n\tdef __init__(self):\n\t\tOrder.__last_order_no += 1\n\t\tself.__order_no__ = 0 + Order.__last_order_no\n\t\tself.__lines = []\n\t\tself.__last_line_no = 0\n\n\n\tdef add_line(self, item_no=None, qty=None):\n\t\tassert item_no is not None,\\\n\t\t\t'Order Line SBE instantiated with item_no and qty'\n\t\tassert isinstance(item_no, int) and item_no > 0,\\\n\t\t\t'item_no must be int > 0, is {}'.format(item_no)\n\t\tassert isinstance(qty, int) and qty > 0,\\\n\t\t\t'qty must be int > 0, is {}'.format(qty)\n\t\t\n\t\t# not sure I'm ready for this assert:\n\t\t# assert item_no in Order.__inventory, 'attemted to add item not in inventory: {}'.format(item_no)\n\t\t\n\t\tself.__last_line_no += 1\n\t\tline = Order.OrderItem(0 + self.__last_line_no, item_no, qty, 'ordered')\n\t\tself.__lines.append((line))\n\t\t\n\n\tdef __repr__(self):\n\t\treturn('order_no: {}, {} lines'.format(self.__order_no__,\n\t\t\t\t\t\t\t\t\t\t\t\tlen(self.__lines)))\n\n\t@property\n\tdef order_no(self):\n\t\treturn self.__order_no__\n\n\t@property\n\tdef lines(self):\n\t\treturn self.__lines\n\n" }, { "alpha_fraction": 0.8366013169288635, "alphanum_fraction": 0.8366013169288635, "avg_line_length": 37.25, "blob_id": "211b5baead33f808d9688feae58234bc2bf8d1b1", "content_id": "3f7c9ce2934ce43d3f9d6b9a8bfc08f907471d32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "permissive", "max_line_length": 87, "num_lines": 4, "path": "/README.md", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "# warehouse_optimization\nUsing genetic algorithm to minimize distance pickers travel to pick orders in warehouse\n\nsee test/test_pickroute.py for example\n" }, { "alpha_fraction": 0.5760869383811951, "alphanum_fraction": 0.5978260636329651, "avg_line_length": 29.66666603088379, "blob_id": "d364cb856e2fbb8c4a3331dfd4ec9eabf155cab5", "content_id": "6cde5a95e98afc20ac2c8d1dcbe32dd5e67fa386", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/Warehouse/__init__.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "name = \"Warehouse\"\n__version__ = '1.0'\n#__all__ = [Bin, Inventory, Order, Rack, Warehouse]\n" }, { "alpha_fraction": 0.5148418545722961, "alphanum_fraction": 0.5287104845046997, "avg_line_length": 34.66086959838867, "blob_id": "e3afdfc84f425397ccaddb45a34054cc78ffb803", "content_id": "71da5c4e0db9db6056cfccad867f60383452fb07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4110, "license_type": "permissive", "max_line_length": 117, "num_lines": 115, "path": "/test/test_inventory.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 27, 2019\n\n@author: bren\n'''\nimport sys\nimport unittest\nfrom Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\n\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def testSingleton(self):\n i = Inventory()\n i.clear()\n i2 = Inventory()\n self.assertIs(i.stock, i2.stock, 'singleton test fails')\n self.assertEqual(len(i.stock.keys()), 0, 'inventory SBE empty, found {} items'.format(len(i.stock.keys())))\n \n\n def testUpdateStock(self):\n r_no = 3\n i_no = 3\n location = Bin.Bin_Location(rack_no=r_no, side='a', bin_no=1)\n i = Inventory()\n i.clear()\n i.update_bin(item_no=i_no, location=location, qty=10)\n self.assertEqual(Inventory.__repr__(Inventory()), str(Inventory()), '__repr__ != __string__')\n self.assertEqual(len(i.stock), 1, 'inventory SBE empty, found {} items'\\\n .format(len(i.stock)))\n\n\n def testAddItem(self):\n b = Bin(rack_no=1, side='b', bin_no=2)\n itm_no = 3\n qty = 30\n i = Inventory()\n i.clear()\n i.update_bin(location=b.location, item_no=itm_no, qty=qty)\n self.assertEqual(i.get_stock_qty(location=b.location), (itm_no, qty), \\\n \"item {} at {} SBE qty {}, is {}\".format(itm_no, qty, b.location, \n i.get_stock_qty(location=b.location)))\n b = Bin.get_bin_by_location(b.location)\n self.assertEqual(b.item, itm_no, \\\n 'location ({} item/qty SBE {}, is {}'.format(b.location, (itm_no, qty),\n (b.item, b.count)))\n\n\n def testSingletonDefaultDict(self):\n i = Inventory()\n i.clear()\n i2 = Inventory()\n self.assertIs(i.stock, i2.stock, 'singleton test fails')\n self.assertEqual(len(i.stock), 0, 'inventory SBE empty, found {} items'\\\n .format(i.stock.keys()))\n \n\n def testUpdateStockSeparateSides(self):\n b = Bin(rack_no=1, side='a', bin_no=1)\n b.__count = 0\n i = Inventory()\n i.clear()\n Inventory.update_bin(location=b.location, item_no=1, qty=10)\n self.assertEqual(Inventory.__repr__(Inventory()), str(Inventory()), '__repr__ != __string__')\n self.assertEqual(len(i.stock), 1, 'inventory SBE empty, found {} items'.format(len(i.stock)))\n self.assertEqual(Bin.get_bin_by_location(b.location).count, 10, 'location ({} item SBE 10, is {}'\\\n .format(b.location, Bin.get_bin_by_location(b.location)))\n\n \n b = Bin(rack_no=1, side='b', bin_no=2)\n location = b.location\n i.update_bin(location=location, item_no=1, qty=20)\n (itm, q) = i.get_stock_qty(location=location)\n self.assertEqual(itm, 1, 'item at location {} SBE 1, is {}'.format(location, itm))\n self.assertEqual(q, 20, \"item {} at {} SBE qty 20, is {}\".format(1, location, q))\n \n \n def testExceptions(self):\n i = Inventory()\n i.clear()\n try:\n i.stock = dict()\n raise ValueError('stock rack_no should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n \n def testReprStr(self):\n i = Inventory()\n r = i.__repr__()\n s = i.__str__()\n self.assertEqual(r, s, 'repr \"{}\" not eq str \"{}\"'.format(r, s))\n \n \n def testClear(self):\n b = Bin(rack_no=1, side='a', bin_no=1)\n b.__count = 0\n i = Inventory()\n i.clear()\n Inventory.update_bin(location=b.location, item_no=1, qty=10)\n self.assertEqual(len(i.stock), 1, 'SBE only 1 item in inventory stock, is {}'.format(len(i.stock)))\n i.clear()\n self.assertEqual(len(i.stock), 0, 'SBE no1 items in inventory stock after clear, is {}'.format(len(i.stock)))\n \n" }, { "alpha_fraction": 0.4365842044353485, "alphanum_fraction": 0.45087677240371704, "avg_line_length": 44.50273132324219, "blob_id": "7bb96d99cf90cac0caaa9d3c30888c68106de0e8", "content_id": "248168f103e902501bc17cbaf05636b883653fc4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8326, "license_type": "permissive", "max_line_length": 128, "num_lines": 183, "path": "/test/test_Rack.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 27, 2019\n\n@author: bren\n'''\nimport sys\nimport unittest\nfrom Warehouse.Rack import Rack\nfrom Warehouse.Inventory import Inventory\n\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def testInit(self):\n r = Rack(rack_no=1, bin_count=5)\n self.assertEqual(r.bin_count, 5, 'bin_count SBE 5, is {}'.format(r.bin_count))\n self.assertEqual(r.rack_no, 1, 'rack_no SBE 1, is {}'.format(r.rack_no))\n self.assertEqual(len(r.bins_a), 5, 'side a SBE 5 bins, is {}'.format(len(r.bins_a)))\n self.assertEqual(len(r.bins_b), 5, 'side b SBE 5 bins, is {}'.format(len(r.bins_b)))\n\n b = r.bins_a[1]\n self.assertEqual(b.bin_no, 1, 'bin_no SBE 1, is {}'.format(b.bin_no))\n self.assertIsNone(b.item, 'item SBE None, is {}'.format(b.item))\n self.assertEqual(b.count, 0, 'count SBE 0, is {}'.format(b.count))\n self.assertEqual(b.lat, b.bin_no + 1, 'lat SBE {}, is {}'.format(b.bin_no + 1, b.lat))\n self.assertEqual(b.bin_side, 'a', 'bin_side SBE \"a\", is {}'.format(b.bin_side))\n self.assertEqual(b.rack_no, 1, 'rack_no SBE \"a\", is {}'.format(b.rack_no))\n self.assertEqual(b.long, 1, 'long SBE 1, is {}'.format((r.rack_no * 3) - 2))\n for bins, side in zip((r.bins_a, r.bins_b), list('ab')):\n for b in bins.values():\n self.assertIsNone(b.item, 'bin,side {} item SBE None, is {}'.format((b.bin_no, b.bin_side), b.item))\n self.assertEqual(b.count, 0, 'bin,side {} count SBE 0, is {}'.format((b.bin_no, b.bin_side), b.count))\n\n\n\n def testNearestCap(self):\n r = Rack(rack_no=1, bin_count=5)\n \n b = r.bins_a[1]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (0, 1), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_a[2]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (0, 1), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_a[4]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (7, 1), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_a[5]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (7, 1), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n\n b = r.bins_b[1]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (0, 2), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_b[2]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (0, 2), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_b[4]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (7, 2), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n b = r.bins_b[5]\n nc = r.nearest_cap(b)\n self.assertEqual((nc.lat, nc.long), (7, 2), 'nearest cap for bin {} SBE (7, 1), is {}'\\\n .format(b.location, (nc.lat, nc.long)))\n \n \n \n def testExceptions(self):\n r = Rack(rack_no=1, bin_count=5)\n try:\n r.rack_no = 2\n raise ValueError('setting rack_no should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n\n except Exception as e:\n pass\n try:\n r.bin_count = 2\n raise ValueError('setting bin_count should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n try:\n r.bins_a = 2\n raise ValueError('setting bins_a should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n try:\n r.bins_b = 2\n raise ValueError('setting bins_b should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n try:\n r.add_bin(None)\n raise ValueError('adding bin to rack after init should have raised exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n\n def testRackBin(self):\n r = Rack(rack_no=1, bin_count=5)\n ba = r.bins_a[1]\n self.assertEqual(ba.bin_no, 1, 'bins_a.bin[1].bin_no SBE 1, is {}'.format(ba.bin_no))\n self.assertEqual(ba.bin_side, 'a', 'bins_a.bin[1].bin_side SBE \"a\", is {}'.format(ba.bin_side))\n nc = r.nearest_cap(ba)\n self.assertEqual((nc.lat, nc.long), (0, 1),\n 'nearest cap lat/log for bins_a.bin[1] SBE (0, 1), is {}'.format((nc.lat, nc.long)))\n \n bb = r.bins_b[5]\n self.assertEqual(bb.bin_no, 5, 'bins_b.bin[5].bin_no SBE 5, is {}'.format(ba.bin_no))\n self.assertEqual(bb.bin_side, 'b', 'bins_b.bin[5].bin_side SBE \"a\", is {}'.format(ba.bin_side))\n nc = r.nearest_cap(bb)\n self.assertEqual((nc.lat, nc.long), (7, 2),\n 'nearest cap lat/log for bins_a.bin[1] SBE (7, 1), is {}'.format((nc.lat, nc.long)))\n \n def testStockingRackBins(self):\n Inventory.clear()\n\n r = Rack(rack_no=1, bin_count=5)\n for b in r.bins_a.values():\n b.stock_bin(item_no=b.bin_no, item_count=b.bin_no * 10)\n for b in r.bins_b.values():\n b.stock_bin(item_no=b.bin_no, item_count=b.bin_no * 20)\n \n for b in r.bins_a.values():\n self.assertEqual(b.item, b.bin_no, 'bin @ {} item SBE {}, is {}'.format(b.location,\n b.bin_no,\n b.item))\n self.assertEqual(b.count, b.bin_no * 10, 'bin @ {} count SBE {}, is {}'.format(b.location,\n b.bin_no * 10,\n b.count))\n for b in r.bins_b.values():\n self.assertEqual(b.item, b.bin_no, 'bin @ {} item SBE {}, is {}'.format(b.location,\n b.bin_no,\n b.item))\n self.assertEqual(b.count, b.bin_no * 20, 'bin @ {} count SBE {}, is {}'.format(b.location,\n b.bin_no * 20,\n b.count))\n inv = Inventory()\n # Inventory.__stock[item_no][location]\n for b in r.bins_a.values():\n (i, q) = inv.get_stock_qty(location=b.location)\n self.assertEqual((i, q), (b.item, b.count), 'inventory item/qty for bin @ {} item SBE {}, is {}'.format(b.location,\n (b.item, b.count),\n (i, q)))\n for b in r.bins_b.values():\n (i, q) = inv.get_stock_qty(location=b.location)\n self.assertEqual((i, q), (b.item, b.count), 'inventory qty for bin @ {} item SBE {}, is {}'.format(b.location,\n (b.item, b.count),\n (i, q)))\n \n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" }, { "alpha_fraction": 0.452472448348999, "alphanum_fraction": 0.47296950221061707, "avg_line_length": 32.084747314453125, "blob_id": "d36e9b189396099d149f3bff512dbf001ae78602", "content_id": "64b016c43d0b3a5d776f89029550169c585e5053", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3903, "license_type": "permissive", "max_line_length": 115, "num_lines": 118, "path": "/test/test_warehouse.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 28, 2019\n\n@author: bren\n'''\nfrom itertools import product\nimport sys\nimport unittest\n\nfrom Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\nfrom Warehouse.Warehouse import Warehouse\n\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def testInit(self):\n Warehouse.clear()\n w = Warehouse(5, 5)\n self.assertEqual(len(w.racks), 5, 'w should have 5 racks, is {}'.format(w.racks))\n self.assertEqual(w.racks_bins, (5, 5), 'racks_bin SBE (5, 50, is {}'.format(w.racks_bins))\n self.assertEqual((w.dock.lat, w.dock.long), (0, 7), \\\n 'dock_lat_long SBE (0, 7), is {}'.format((w.dock.lat,\n w.dock.long)))\n self.assertEqual(w.__repr__(), 'racks: 5, dock (lat, long): (0, 7), inventory has 0 item_no, 0 quantities',\n 'repr should not be \"{}\"'.format(w.__repr__()))\n \n \n def testUpdateStockSingle(self):\n Warehouse.clear()\n w = Warehouse(5, 5)\n b = w.racks[0].bins_a[1]\n w.update_stock(1, 40, b.location)\n _, q = w.get_stock_qty(1)\n assert q == 40, 'item 1 qty SBE {} but is {}'.format(40, q)\n \n \n \n def testExceptions(self):\n Warehouse.clear()\n w = Warehouse(5, 5)\n\n try:\n # confirm 2nd instantiation w/o clear() raises exception\n Warehouse(2, 2)\n raise RuntimeError('second and subsequent instantiations of warehouse should raise exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n \n # these should raise exceptions\n try:\n w.racks = 1\n raise RuntimeError('setting racks should raise exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n\n try:\n w.dock_lat_long = (0,0)\n raise RuntimeError('setting dock_lat_long should raise exception')\n except ValueError as v:\n print(v)\n sys.exit(-1)\n except Exception as e:\n pass\n\n\n def testReset(self):\n Warehouse.clear()\n # New warehouse configuration with reset() that combines clear() and init(racks, bins)\n try:\n # test reset allowing warehouse reconfiguration\n w = Warehouse.reset(5, 5)\n self.assertEqual(len(w.racks), 5, 'racks SBE 5 but is {}'.format(len(w.racks)))\n self.assertEqual((w.dock.lat, w.dock.long), (0, 7), \\\n 'dock lat/long SBE (0, 7), is {}'.format((w.dock.lat,\n w.dock.long)))\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n\n def testStockingRackBins(self):\n Inventory.clear()\n Warehouse.clear()\n wh = Warehouse(5, 5)\n\n for i, (r, s, bn) in enumerate(product(range(1, 5 + 1, 1),\n list('ab'),\n range(1, 5 + 1, 1))):\n if s == 'a':\n b = wh.racks[r - 1].bins_a[bn]\n else: # s == 'b'\n b = wh.racks[r - 1].bins_b[bn]\n wh.update_stock(i + 1, (i + 1) * 10, b.location)\n _, q = wh.get_stock_qty(i + 1)\n self.assertEqual(q, (i + 1) * 10, 'inventory qty for item {} item SBE {}, is {}'.format(i,\n (i + 1) * 10,\n q))\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testInit']\n unittest.main()" }, { "alpha_fraction": 0.4758879542350769, "alphanum_fraction": 0.49566850066185, "avg_line_length": 34.09693908691406, "blob_id": "50838206114f0a3e516555e4490ad4a34c2d38aa", "content_id": "8ed7a587f29b553e6c0640e1ae6a8ac6d40d938d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6926, "license_type": "permissive", "max_line_length": 124, "num_lines": 196, "path": "/test/test_bin.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 27, 2019\n\n@author: bren\n'''\nimport sys\nimport unittest\n\nfrom Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n Inventory.clear()\n\n\n def tearDown(self):\n pass\n\n def testInit(self):\n b = Bin(rack_no=1, side='b', bin_no=3)\n self.assertEqual(b.rack_no, 1, 'rack_no SBE 1, is {}'.format(b.rack_no))\n self.assertEqual(b.bin_no, 3, 'bin_no SBE 3, is {}'.format(b.bin_no))\n self.assertEqual(b.bin_side, 'b', 'bin_no SBE \"b\", is {}'.format(b.bin_side))\n self.assertEqual(b.location, Bin.Bin_Location(1, 'b', 3), 'bin_no SBE {}, is {}'.format(Bin.Bin_Location(1, 'b', 3),\n b.location))\n self.assertEqual(b.lat, 4, 'bin_no SBE 4, is {}'.format(b.lat))\n self.assertEqual(b.long, 2, 'bin_no SBE 2, is {}'.format(b.long))\n self.assertIsNone(b.item, 'item SBE None, is {}'.format(b.item))\n self.assertEqual(b.count, 0, 'bin_no SBE 0, is {}'.format(b.count))\n \n\n\n def testInitMissing(self):\n # confirm argument testing\n try:\n t = 'rack_no'\n _ = Bin(side='a', bin_no=1, item=1, count=10)\n raise ValueError('missing {} SBE exception'.format(t))\n except ValueError:\n print(t)\n sys.exit(-1)\n except Exception:\n pass\n try:\n t = 'bin_no'\n _ = Bin(rack_no=1, side='a', item=1, count=10)\n raise ValueError('missing {} SBE exception'.format(t))\n except ValueError:\n print(t)\n sys.exit(-1)\n except Exception:\n pass\n try:\n t = 'side'\n _ = Bin(rack_no=1, bin_no=1, item=1, count=10)\n raise ValueError('missing {} SBE exception'.format(t))\n except ValueError:\n print(t)\n sys.exit(-1)\n except Exception:\n pass\n\n\n def testGetBinByLocation(self): \n # confirm initialization of attributes\n b = Bin(rack_no=1, side='b', bin_no=5, item=2, count=10)\n location = b.location\n b = Bin.get_bin_by_location(location)\n self.assertEqual( (1, 'b', 5), (location.rack, location.side, location.bin_no),\\\n 'bin rack_no, side, bin_no SBE (1, \"b\", 5), is {}'\\\n .format((location.rack, location.side, location.bin_no)))\n self.assertEqual((6, 2), (b.lat, b.long), \\\n 'bin lat/long SBE {}, is{}'.format((2, 0), (b.lat, b.long)))\n \n b = Bin(rack_no=1, side='a', bin_no=1, item=1, count=10)\n location = b.location\n b = Bin.get_bin_by_location(location)\n self.assertEqual((1, 'a', 1), (location.rack, location.side, location.bin_no),\\\n 'bin rack_no, side, bin_no SBE (1, \"a\", a), is {}'\\\n .format((location.rack, location.side, location.bin_no)))\n self.assertEqual((2, 1), (b.lat, b.long), \\\n 'bin lat/long SBE {}, is{}'.format((2, 0), (b.lat, b.long)))\n\n b = Bin.get_bin_by_location(location)\n b.stock_bin(item_no=2, item_count=30) # removes prior item (1) replacing with item 2\n self.assertEqual((2, 1, 1, 'a', 30), (b.item, \n b.bin_no, b.rack_no, b.bin_side,\n b.count),\\\n 'item and count SBE (1, 30), is {}'.format((b.item, b.count)))\n\n \n def testExceptions(self):\n b = Bin(rack_no=1, side='a', bin_no=1,\n item=1, count=10)\n \n try:\n b.rack_no = 2\n raise ValueError('trying to set rack_no should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.bin_no = 2\n raise ValueError('trying to set bin_no should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.bin_side = 'b'\n raise ValueError('trying to set bin_side should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.location = (1, 'a', 1)\n raise ValueError('trying to set location should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.lat = (1, 'a', 1)\n raise ValueError('trying to set lat should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.long = (1, 'a', 1)\n raise ValueError('trying to set long should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.item = (1, 'a', 1)\n raise ValueError('trying to set item should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n try:\n b.count = (1, 'a', 1)\n raise ValueError('trying to set count should raise exception')\n except ValueError as ve:\n print(ve)\n sys.exit(-1)\n except:\n pass\n \n\n def testBinLocation(self):\n bl1 = Bin.Bin_Location(1, 'a', 1)\n bl2 = Bin.Bin_Location(1, 'a', 1)\n self.assertEqual(bl1, bl2, '{} SBE equal {}'.format(bl1, bl2))\n bl2 = Bin.Bin_Location(1, 'a', 2)\n self.assertNotEqual(bl1, bl2, '{} SBE not equal {}'.format(bl1, bl2))\n self.assertLess(bl1, bl2, '{} SBE LT {}'.format(bl1, bl2))\n \n \n def testDropBin(self):\n # del Bin.__bin_locations[location]\n Bin.clear()\n self.assertEqual(len(Bin.bin_locations), 0, 'clear method should reset class list of prior Bin instances')\n bl1 = Bin(rack_no=1, side='a', bin_no=1)\n bl2 = Bin(rack_no=1, side='a', bin_no=2)\n self.assertEqual(len(Bin.bin_locations), 2, \n 'class list of Bin instances SBE len 2, is {}'\\\n .format(len(Bin.bin_locations)))\n Bin.drop_bin(bl1.location)\n self.assertEqual(len(Bin.bin_locations), 1, \n 'class list of Bin instances SBE len 1 after drop_bin, is {}'\\\n .format(len(Bin.bin_locations)))\n self.assertEqual(bl2, Bin.get_bin_by_location(bl2.location), \n 'bin at {} should still be in bin instance list after drop'\\\n .format(bl2.location))\n \n \n " }, { "alpha_fraction": 0.6525111794471741, "alphanum_fraction": 0.6569710969924927, "avg_line_length": 26.566844940185547, "blob_id": "bd3549e299b00bde182e85e675f0e9a4b19f4445", "content_id": "206d958f78ae28b691cddfd001680da46a9274e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5157, "license_type": "permissive", "max_line_length": 90, "num_lines": 187, "path": "/Warehouse/PickRoute.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 28, 2019\n\n@author: bren\n'''\nfrom builtins import isinstance\nfrom collections import OrderedDict\nfrom functools import lru_cache\nimport numpy as np\nfrom Warehouse.Bin import Bin\nfrom Warehouse.Order import Order\n\nclass PickRoute:\n\t'''\nclassdocs\n'''\n\t\n\t@lru_cache(maxsize=5000)\n\tdef __bin_to_bin_distance(self, from_bin, to_bin):\n\t\t'''Manhattan distance, not Euclidian\n\t\tcall with two Bin class instances\n\t'''\n\t\track_diff = to_bin.rack_no - from_bin.rack_no\n\t\tif (rack_diff == 0 and from_bin.bin_side == to_bin.bin_side) or\\\n\t\t (rack_diff == 1 and (from_bin.bin_side, to_bin.bin_side) == ('b', 'a')):\n\t\t\treturn np.abs(from_bin.lat_long - to_bin.lat_long).sum()\n\t\t\n\t\tif to_bin.nearest_cap_distance < from_bin.nearest_cap_distance:\n\t\t\tnc_bin = to_bin.nearest_cap\n\t\telse:\n\t\t\tnc_bin = from_bin.nearest_cap \n\t\t#f_2_t = np.hstack([from_bin.lat_long, nc_bin.lat_long, to_bin.lat_long]).reshape(3, 2)\n\t\treturn np.abs(from_bin.lat_long - nc_bin.lat_long, \n\t\t\t\t\t nc_bin.lat_long - to_bin.lat_long).sum()\n\t\n\n\tdef __init__(self, wh, order):\n\t\t'''\n\t\tConstructor\n\t\t'''\n\t\tassert isinstance(order, Order), 'order must be instance of Order'\n\t\tself.__order = order\n\t\tself.__order_lines = order.lines\n\n\t\tself.warehouse = wh # pick up singleton\n\t\t\n\t\tself.__pick_bins = set()\n\t\tself.__pick_items = set()\n\t\tfor line in self.order_lines:\n\t\t\titem_locations = self.warehouse.stock[line.item_no]\n\t\t\tif len(item_locations) == 0:\n\t\t\t\t# silently ignore items not in inventory\n\t\t\t\t# but monkey-patch note that item not in inventory\n\t\t\t\tline.status = 'not in inventory'\n\t\t\telse:\n\t\t\t\tself.__pick_items.add(line.item_no)\n\t\t\t\tfor item_location in item_locations:\n\t\t\t\t\tself.__pick_bins.add(Bin.get_bin_by_location(item_location))\n\n\t\tself.__route = OrderedDict()\n\t\tself.__route_distance = 0\n\t\t\n\t\tself.__calc_route()\n\t\t\n\n\tdef bin_to_bin_distance(self, from_bin, to_bin):\n\t\treturn self.__bin_to_bin_distance(from_bin, to_bin)\n\n\n\tdef __calc_route(self):\n\t\tpick_items = self.__pick_items.copy()\n\t\tpick_bins = sorted(self.__pick_bins.copy())\n\t\t\n\t\t# first one starts from dock, ends on nearest to first rack, side a, first bin\n\t\tto_bin = pick_bins[0]\n\t\tfrom_bin = self.wh.dock\n\t\tself.__route_distance += int(abs(from_bin.lat_long - to_bin.lat_long).sum())\n\t\tself.__route[(from_bin.location, to_bin.location)] = self.__route_distance\n\t\tpick_items.remove(to_bin.item)\n\t\tpick_bins.remove(to_bin)\n\t\tfrom_bin = to_bin\n\t\t# pick closest to \n\t\twhile len(pick_items) > 0 and len(pick_bins) > 0:\n\t\t\tto_bin = None\n\t\t\tmin_bin_dist = 1e10\n\t\t\tmin_item = None\n\t\t\tfor check_bin in pick_bins:\n\t\t\t\tif check_bin.item not in pick_items:\n\t\t\t\t\tpick_bins.remove(check_bin)\n\t\t\t\t\tcheck_bin = None\n\t\t\t\t\tcontinue # already picked this item\n\t\t\t\telse:\n\t\t\t\t\td = self.bin_to_bin_distance(*sorted((from_bin, check_bin)))\n\t\t\t\t\tif d < min_bin_dist:\n\t\t\t\t\t\tmin_item = check_bin.item\n\t\t\t\t\t\tmin_bin_dist = d \n\t\t\t\t\t\tto_bin = check_bin\n\t\t\tif to_bin is not None:\n\t\t\t\tself.__route_distance += min_bin_dist\n\t\t\t\tself.__route[(from_bin.location, to_bin.location)] = min_bin_dist\n\t\t\t\tif min_item is not None:\n\t\t\t\t\tpick_items.remove(to_bin.item)\n\t\t\t\tpick_bins.remove(to_bin)\n\t\t\t\tfrom_bin = to_bin\n\t\t\n\t\t# last step is to dock\n\t\tfrom_bin = to_bin\n\t\tto_bin = self.wh.dock\n\t\tself.__route_distance += self.bin_to_bin_distance(*sorted((from_bin, to_bin)))\n\t\tself.__route[(from_bin.location, to_bin.location)] = self.__route_distance\n\t\treturn None\n\t\t\n\t\t \n\tdef loc_to_loc_distance(self, from_loc, to_loc):\n\t\t'''Manhattan distance, not Euclidian\n\t\tcall with two Bin class instances\n'''\n\t\tassert isinstance(from_loc, Bin.Bin_Location) and isinstance(to_loc, Bin.Bin_Location),\\\n\t\t\t\t'both arguments must be instance of Bin class'\n\t\tfrom_bin = Bin.get_bin_by_location(from_loc)\n\t\tto_bin = Bin.get_bin_by_location(to_loc)\n\t\treturn bin_to_bin_distance(*sorted((from_bin, to_bin)) )\n\n\t\t\n\tdef __repr__(self):\n\t\trtn = 'PickRoute: Order: {},\\nproute, distance:\\n{}'.format(self.order.__repr__(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '\\n'.join(['\\tfrom.to: {} dist: {}'.format(ft, d)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor ft, d in self.__route.items()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\t\treturn rtn\n\t\n\t\n\t@property\n\tdef wh(self):\n\t\treturn self.warehouse\n\t\n\[email protected]\n\tdef wh(self, args):\n\t\traise RuntimeError('warehouse is set by init')\n\t\n\t\n\t@property\n\tdef order_lines(self):\n\t\treturn self.__order_lines\n\t\n\t@order_lines.setter\n\tdef order_lines(self, args):\n\t\traise RuntimeError('order_lines is set by init')\n\t\n\t\n\t@property\n\tdef order(self):\n\t\treturn self.__order\n\t\n\[email protected]\n\tdef order(self, args):\n\t\traise RuntimeError('order attribute set by init')\n\t\n\t\n\t@property\n\tdef pick_bins(self):\n\t\treturn self.__pick_bins\n\n\t@pick_bins.setter\n\tdef pick_bins(self):\n\t\traise RuntimeError('PickRoute pick_bins attribute set by init')\n\t\n\n\t@property\n\tdef route(self):\n\t\tif self.__route is None:\n\t\t\tself.__route = PickRoute.__calc_route(self.order)\n\t\treturn [s for s in self.__route]\n\n\[email protected]\n\tdef route(self):\n\t\traise RuntimeError('PickRoute route attribute set by init')\n\t\n\t\n\t@property\n\tdef route_distance(self):\n\t\treturn self.__route_distance\n\t\n\t@route_distance.setter\n\tdef route_distance(self, args):\n\t\traise RuntimeError('route_distance attribute set by init')\n\t\n" }, { "alpha_fraction": 0.6212903261184692, "alphanum_fraction": 0.6232258081436157, "avg_line_length": 26.589284896850586, "blob_id": "603e715dc519599bbcc25a05982fc27949605303", "content_id": "9b6c0793d9e11983aa0e269ed650dc038276553d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1550, "license_type": "permissive", "max_line_length": 86, "num_lines": 56, "path": "/Warehouse/circular_queue.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from collections import deque\nfrom numpy.random import shuffle, randint, seed\n\n\nclass CircularQueue:\n '''A class created with an iterable of objects that are to be accessed\nsequentially and, when the last object is returned, the next access returns \nthe first item. This process continues without end.\n'''\n\n __name__ = 'CircularQueue'\n\n def __init__(self, items):\n assert '__iter__' in items.__dir__(), 'items argument must be iterable'\n self.__myitems = []\n self.__items = self.prepItems(items)\n \n def prepItems(self, items):\n return [o for o in items]\n\n\n @property\n def item(self):\n if self.itemsLen() == 0:\n self.__myitems = deque(self.__items)\n return self.__myitems.popleft()\n \n def itemsLen(self):\n return len(self.__myitems)\n \nclass RandomizedCircularQueue(CircularQueue):\n '''A subclass of CircularQueue that randomizes order of items before queuing them.\nEach instance receives a sequential integer used as a seed for np.random to support \nunit testing.\n'''\n __seed = 1\n __name__ = 'RandomizedCircularQueue'\n \n def __init__(self, items):\n self.__seed = RandomizedCircularQueue.__seed\n RandomizedCircularQueue.__seed += 1\n super().__init__(items)\n pass # for debugging\n\n\n # override parent method\n def prepItems(self, items):\n seed(self.seed)\n rtn = [o for o in items]\n shuffle(rtn)\n return rtn\n \n\n @property\n def seed(self):\n return self.__seed\n \n" }, { "alpha_fraction": 0.6302195191383362, "alphanum_fraction": 0.6366767287254333, "avg_line_length": 26.270587921142578, "blob_id": "f95fed861cdf45be4ecea67ef033d39b1e1798b5", "content_id": "fd453fa31f5d151651d8bc8d36bb42b2a7f5538a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2323, "license_type": "permissive", "max_line_length": 79, "num_lines": 85, "path": "/Warehouse/Rack.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\n\nclass Rack:\n\t'''\n\tInstantiation:\n\t\tRack(rack_no = int > 0, bin_count = int > 0)\n\t\t\n\tMethod Notes:\n\t\tbins_a and bins_b are dicts of bins and accessed by bin_no as key\n'''\n\n\t_inventory = Inventory()\n\n\n\tdef __init__(self, rack_no, bin_count):\n\t\tassert isinstance(rack_no, int) and rack_no > 0,\\\n\t\t\t'rack_no must be int > 0'\n\t\tassert isinstance(bin_count, int) and bin_count > 0,\\\n\t\t\t'bin_count must be int > 0'\n\t\tself.__rack_no = rack_no\n\t\tself.__bin_count = bin_count\n\t\tself.__top_cap_lat = bin_count + 2 # always with fixed # of bins\n\t\tself.__bottom_cap_lat = 1 # always\n\t\tself.__bins_a = dict() # access via bin_no\n\t\tself.__bins_b = dict() # access via bin_no\n\t\t\n\t\tfor bin_no in range(1, bin_count + 1, 1):\n\t\t\tb = Bin(rack_no=self.rack_no, side='a', bin_no=bin_no)\n\t\t\tb.nearest_cap = self.nearest_cap(b)\n\t\t\tb.nearest_cap_distance = int(abs(b.lat_long - b.nearest_cap.lat_long).sum())\n\t\t\tself.__bins_a[bin_no] = b\n\t\t\t\t\t\t\n\t\t\tb = Bin(rack_no=self.rack_no, side='b', bin_no=bin_no)\n\t\t\tb.nearest_cap = self.nearest_cap(b)\n\t\t\tb.nearest_cap_distance = int(abs(b.lat_long - b.nearest_cap.lat_long).sum())\n\t\t\tself.__bins_b[bin_no] = b\n\t\t\t\n\t\t\t\n\tdef __repr__(self):\n\t\t\treturn 'rack_no {}, bin_count: {}, top_cap_lat: {:2d}, bottom_cap_lat: {}'\\\n\t\t\t\t\t.format(self.__rack_no, self.__bin_count, \\\n\t\t\t\t\t\t\tself.__top_cap_lat,\tself.__bottom_cap_lat)\n\n\n\tdef nearest_cap(self, b):\n\t\t(dist_bottom, dist_top) = (b.bin_no, self.bin_count - b.bin_no + 1)\n\t\tif dist_bottom > dist_top:\n\t\t\tbin_no = self.bin_count + 1\n\t\telse:\n\t\t\tbin_no = 0\n\t\treturn Bin(rack_no=self.rack_no, side=b.bin_side, bin_no=bin_no)\n\n\t\n\t@property\n\tdef rack_no(self):\n\t\treturn self.__rack_no\n\t\n\t@rack_no.setter\n\tdef rack_no(self, *arg, **varg):\n\t\traise RuntimeError('rack_no is {} and can not be reset'.format(self.rack_no))\n\n\t@property\n\tdef bins_a(self):\n\t\treturn self.__bins_a\n\t\n\t@bins_a.setter\n\tdef bins_a(self, *arg):\n\t\traise RuntimeError('setting bin arrays by init')\n\n\t@property\n\tdef bins_b(self):\n\t\treturn self.__bins_b\n\t\n\t@bins_b.setter\n\tdef bins_b(self, *arg):\n\t\traise RuntimeError('setting bin arrays by init')\n\n\t@property\n\tdef bin_count(self):\n\t\treturn self.__bin_count\n\t\n\t@bin_count.setter\n\tdef bin_count(self, *argv):\n\t\traise RuntimeError('bin_count only gets set when adding bin(s)')\n\t\t\n\t\n" }, { "alpha_fraction": 0.5591866374015808, "alphanum_fraction": 0.5715323090553284, "avg_line_length": 26, "blob_id": "5307f73b503660d3cdc1f923c84566e8a971e80e", "content_id": "6d13d90d3c619313f077ebb0c3c8e02dc4e03c8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1377, "license_type": "permissive", "max_line_length": 84, "num_lines": 51, "path": "/test/test_circular_queue.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on Jun 8, 2019\n\n@author: bren\n'''\nimport unittest\nfrom Warehouse.circular_queue import CircularQueue, RandomizedCircularQueue\nfrom numpy.random import seed, randint, shuffle\n\n\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def testCircularQueue(self):\n cq = CircularQueue(range(5))\n for i in range(5):\n cqi = cq.item\n self.assertEqual(cqi, i, 'queue should return {}, is {}'.format(i, cqi))\n \n def testRandomizedCircularQueue(self):\n cq = RandomizedCircularQueue(range(5))\n self.assertEqual(cq.seed, 1, 'seed SBE 1, is {}'.format(cq.seed))\n r = [o for o in range(5)]\n seed(cq.seed)\n shuffle(r)\n cqs = [cq.item for _ in range(5)]\n for i, s in zip(r, cqs):\n self.assertEqual(i, s, 'cq.item should return {}, is {}'.format(i, s))\n\n cq = RandomizedCircularQueue(range(5))\n self.assertEqual(cq.seed, 2, 'seed SBE 2, is {}'.format(cq.seed))\n r = [o for o in range(5)]\n seed(cq.seed)\n shuffle(r)\n cqs = [cq.item for _ in range(5)]\n for i, s in zip(r, cqs):\n self.assertEqual(i, s, 'cq.item should return {}, is {}'.format(i, s))\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testCircularQueue']\n unittest.main()\n" }, { "alpha_fraction": 0.6413095593452454, "alphanum_fraction": 0.6473278999328613, "avg_line_length": 22.072221755981445, "blob_id": "61f4962b7ad3d137ceef20ff339c118fca8e63a7", "content_id": "d3ddab18462ee0cf803bc66ac24ef9b1cd977676", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4154, "license_type": "permissive", "max_line_length": 114, "num_lines": 180, "path": "/Warehouse/Bin.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "from _collections import defaultdict\nfrom functools import total_ordering\nimport numpy\n\nclass Bin:\n\t\n\tbin_locations = defaultdict(lambda: None) # Bin.__bin_location[location] = Bin\n\t\n\t\n\t@classmethod\n\tdef clear(cls):\n\t\tcls.bin_locations = defaultdict(lambda: None) # Bin.__bin_location[location] = Bin\n\t\t\n\t\t\n\t@classmethod\n\tdef get_bin_by_location(cls, location):\n\t\tassert isinstance(location, Bin.Bin_Location), 'get_bin_by_location to be called with Bin.Bin_Location instance'\n\t\treturn Bin.bin_locations[location]\n\t\n\t\n\t@classmethod\n\tdef drop_bin(cls, location):\n\t\tassert isinstance(location, Bin.Bin_Location), 'drop_bin to be called with Bin.Bin_Location instance'\n\t\ttry:\n\t\t\tdel Bin.bin_locations[location]\n\t\texcept:\n\t\t\tpass\n\t\n\n\t# start internal class Bin_location\n\t@total_ordering\n\tclass Bin_Location:\n\t\t\n\t\t__name__ = 'Bin_Llocation'\n\t\t\n\t\tdef __init__(self, rack_no, side, bin_no):\n\t\t\tself.__rack = rack_no\n\t\t\tself.__side = side\n\t\t\tself.__bin_no = bin_no\n\t\t\tself.__tuple = (rack_no, side, bin_no)\n\t\t\t\n\t\t\n\t\tdef __lt__(self, other):\n\t\t\treturn self.astuple < other.astuple\n\t\t\n\t\t\n\t\tdef __eq__(self, other):\n\t\t\treturn self.astuple == other.astuple\n\t\t\n\t\t\n\t\tdef __hash__(self):\n\t\t\treturn hash('{:02d}{:1s}{:02d}'.format(self.rack, self.side, self.bin_no))\n\t\t\n\t\t\n\t\tdef __repr__(self):\n\t\t\treturn 'rack: {}, side: {}, bin_no: {}'.format(self.rack, self.side, self.bin_no)\n\n\n\t\t@property\n\t\tdef astuple(self):\n\t\t\treturn self.__tuple\n\t\t\n\t\t@property\n\t\tdef rack(self):\n\t\t\treturn self.__rack\n\t\t\n\t\t@property\n\t\tdef side(self):\n\t\t\treturn self.__side\n\t\t\n\t\t@property\n\t\tdef bin_no(self):\n\t\t\treturn self.__bin_no\n\n\t\t# end of class Bin_Location\n\t\t\n\n\t# start methods for class Bin\n\tdef __init__(self, rack_no, side, bin_no,\n\t\t\t\t item=None, count=0):\n\n\t\tself.__rack_no = rack_no\n\t\tself.__bin_side = side # a or b\n\t\tself.__bin_no = bin_no\n\t\tself.__location = Bin.Bin_Location(rack_no, side, bin_no)\n\t\tlat = bin_no + 1 if bin_no > 0 else 0 # dock is bin_no 0 at lat 0\n\t\tlong = (rack_no * 3) - 2 if side == 'a' else (rack_no * 3) - 1\n\t\tself.__lat_long = numpy.array([lat, long])\n\t\tself.__item = item\n\t\tself.__count = count\n\t\tself.__nearest_cap = None\n\t\tself.__nearest_cap_distance = None\n\n\t\tBin.bin_locations[self.location] = self\n\n\tdef stock_bin(self, item_no, item_count):\n\t\t'''\n\t\tadd item_count of items in bin\n\t\tif item_no is != bin.item, replace bin.item, bin.count with item_no, item_count (not added to)\n\t\tSBE called from Inventory class where this bin instance lives\n'''\n\t\tassert isinstance(item_no, int) and item_no > 0, 'item_no must be int > 0'\n\t\tassert isinstance(item_count, int), 'item_count must be int, can be +/-'\n\t\tself.__count = item_count if self.item != item_no else self.__count + item_count\n\t\tif self.__count < 0:\n\t\t\tself.__count = 0\n\t\tself.__item = item_no\n\n\n\tdef __lt__(self, other):\n\t\treturn self.location < other.location\n\t\n\t\n\tdef __repr__(self):\n\t\trtn = 'rack_no: {}, bin_no: {}, bin_side: {} '\\\n\t\t\t\t.format(self.__rack_no, self.__bin_no, self.__bin_side)\n\t\trtn += 'lat: {}, long: {}, item: {}, count: {} '\\\n\t\t\t\t .format(self.__lat_long[0], self.__lat_long[1],\n\t\t\t\t\t\t self.__item, self.__count)\n\n\t\treturn rtn\n\n\t\n\t@property\n\tdef nearest_cap(self):\n\t\treturn self.__nearest_cap\n\t\n\t@nearest_cap.setter\n\tdef nearest_cap(self, cap):\n\t\tassert isinstance(cap, Bin), 'nearest_cap must be class Bin'\n\t\tself.__nearest_cap = cap\n\t\n\t@property\n\tdef nearest_cap_distance(self):\n\t\treturn self.__nearest_cap_distance\n\t\n\t@nearest_cap_distance.setter \n\tdef nearest_cap_distance(self, dist):\n\t\tassert isinstance(dist, int) and dist >= 0, 'distance must be int > 0'\n\t\tself.__nearest_cap_distance = int(dist)\n\t\n\t@property\n\tdef rack_no(self):\n\t\treturn self.__rack_no\n\n\t@property\n\tdef bin_no(self):\n\t\treturn self.__bin_no\n\n\t@property\n\tdef bin_side(self):\n\t\treturn self.__bin_side\n\n\t@property\n\tdef bins(self):\n\t\treturn Bin.bin_locations.values()\n\n\t@property\n\tdef location(self):\n\t\treturn self.__location\n\n\t@property\n\tdef lat_long(self):\n\t\treturn self.__lat_long\n\n\t@property\n\tdef lat(self):\n\t\treturn self.__lat_long[0]\n\n\t@property\n\tdef long(self):\n\t\treturn self.__lat_long[1]\n\n\t@property\n\tdef item(self):\n\t\treturn self.__item\n\n\t@property\n\tdef count(self):\n\t\treturn self.__count\n\n" }, { "alpha_fraction": 0.47122615575790405, "alphanum_fraction": 0.49326568841934204, "avg_line_length": 37.37583923339844, "blob_id": "24a9ecda5ebbc16cdc6efc08ed3643141d47789c", "content_id": "3218fb5a7cfa30219460b101ad9764e057626ec5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5717, "license_type": "permissive", "max_line_length": 116, "num_lines": 149, "path": "/test/test_pickroute.py", "repo_name": "upandacross/warehouse_optimization", "src_encoding": "UTF-8", "text": "'''\nCreated on May 28, 2019\n\n@author: bren\n'''\nfrom itertools import product\nimport numpy as np\nimport os\nimport unittest\nfrom Warehouse.Bin import Bin\nfrom Warehouse.Inventory import Inventory\nfrom Warehouse.Order import Order\nfrom Warehouse.PickRoute import PickRoute\nfrom Warehouse.Warehouse import Warehouse\nfrom Box2D.Box2D import b2AssertException\n\nclass Test(unittest.TestCase):\n Warehouse.clear()\n wh = Warehouse(5, 5)\n rack_count, bin_count = wh.racks_bins\n bins_total = rack_count * bin_count * 2\n\n inv = Inventory()\n num_items = bins_total\n o = None\n order_lines = 5\n setup_calls = 0\n\n \n def setUp(self):\n Test.setup_calls += 1\n # print('setup call number {}'.format(Test.setup_calls))\n \n np.random.seed(42)\n \n Test.wh = Test.wh.reset(5, 5)\n self.wh = Test.wh\n Test.inv.clear()\n rsb = [(r, s, b) for r, s, b in product(range(1, 5 + 1, 1),\n list('ab'),\n range(1, 5 + 1, 1))]\n np.random.seed(42)\n np.random.shuffle(rsb)\n for i, (r, s, bn) in enumerate(rsb):\n if s == 'a':\n b = self.wh.racks[r - 1].bins_a[bn]\n else: # s == 'b'\n b = self.wh.racks[r - 1].bins_b[bn]\n self.wh.update_stock(i + 1, (i + 1) * 10, b.location)\n\n\n np.random.seed(42)\n Test.o = Order()\n np.random.seed(42)\n for i, q in zip(np.random.choice(range(1, Test.num_items + 1), size=Test.order_lines, replace=False),\n np.random.randint(1, Test.num_items + 1, size=Test.order_lines)):\n Test.o.add_line(item_no=int(i), qty=int(q))\n\n racks = self.wh.racks\n b = racks[0].bins_a[1]\n self.assertEqual(b.item, 26, 'rack 1, bin @ {}, item SBE 26, is {}'.format(b.location, b.item))\n # print(b)\n b = racks[1].bins_a[1]\n self.assertEqual(b.item, 41, 'rack 1, bin @ {}, item SBE 41, is {}'.format(b.location, b.item))\n # print(b)\n b = racks[1].bins_b[1]\n self.assertEqual(b.item, 20, 'rack 1, bin @ {}, item SBE 29, is {}'.format(b.location, b.item))\n # print(b)\n \n\n\n def tearDown(self):\n pass\n\n\n def testInit(self):\n # print('testInit')\n pr = PickRoute(Test.wh, self.o)\n self.assertEqual(len(pr.wh.racks), 5, 'warehouse should have 5 racks, is {}'.format(len(pr.wh.racks)))\n self.assertEqual(len(self.o.lines), Test.order_lines, 'order lines SBE {}, is {}'.format(Test.order_lines,\n len(self.o.lines)))\n self.assertEqual(len(pr.pick_bins), Test.order_lines, \\\n 'order pick_bins SBE {}, is {}'.format(Test.order_lines,\n len(pr.pick_bins)))\n \n \n def testBinsDistance(self):\n # print('testBinsDistance')\n b1 = Bin(rack_no=1, side='b', bin_no=1)\n b2 = Bin(rack_no=1, side='a', bin_no=5)\n pr = PickRoute(Test.wh, self.o)\n d = pr.bin_to_bin_distance(b1, b2)\n print('from {} to {} dist: {}'.format(b1.location, b2.location, d))\n #expected = 16\n #self.assertEqual(d, expected, '{} to {} SBE [], is {}'\\\n # .format(b1.location, b2.location, expected, d))\n\n \n b1 = Bin(rack_no=1, side='b', bin_no=1)\n b2 = Bin(rack_no=2, side='a', bin_no=1)\n pr = PickRoute(Test.wh, self.o)\n d = pr.bin_to_bin_distance(b1, b2)\n print('from {} to {} dist: {}'.format(b1.location, b2.location, d))\n #expected = 16\n #self.assertEqual(d, expected, '{} to {} SBE [], is {}'\\\n # .format(b1.location, b2.location, expected, d))\n\n \n b1 = Bin(rack_no=1, side='a', bin_no=4)\n b2 = Bin(rack_no=4, side='b', bin_no=5)\n pr = PickRoute(Test.wh, self.o)\n d = pr.bin_to_bin_distance(b1, b2)\n print('from {} to {} dist: {}'.format(b1.location, b2.location, d))\n #expected = 16\n #self.assertEqual(d, expected, '{} to {} SBE [], is {}'\\\n # .format(b1.location, b2.location, expected, d))\n\n \n b1 = Bin(rack_no=3, side='a', bin_no=0)\n b2 = Bin(rack_no=4, side='b', bin_no=5)\n pr = PickRoute(Test.wh, self.o)\n d = pr.bin_to_bin_distance(b1, b2)\n print('from dock {} to {} dist: {}'.format(b1.location, b2.location, d))\n #expected = 16\n #self.assertEqual(d, expected, '{} to {} SBE [], is {}'\\\n # .format(b1.location, b2.location, expected, d))\n\n \n def testRoute(self):\n # print('testRoute')\n np.random.seed(42)\n pr = PickRoute(Test.wh, self.o)\n expected_steps = 46\n if pr.route_distance != expected_steps:\n print('route distance {:d} SBE {} {}'.format(pr.route_distance,\n expected_steps,\n 'maybe next time?' \\\n if pr.route_distance != expected_steps \\\n else ''))\n # print(os.path.curdir)\n # I'm missing a random.seed somewhere\n self.assertEqual(pr.route_distance, expected_steps,\n 'route_distince SBE {}, is {}'\\\n .format(expected_steps, pr.route_distance))\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" } ]
19
sashatran/cal.ly
https://github.com/sashatran/cal.ly
2adc8a1700669bfb3ef2ef0ede5fd6a565dabea7
b12b8db1456c0ae2f84bc9c25a9e2ed2584dc89a
5056466204dcbccbe0275bb84a77d8fbf334cb72
refs/heads/master
2017-12-13T16:31:44.468456
2017-01-18T02:28:56
2017-01-18T02:28:56
78,305,938
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5770425200462341, "alphanum_fraction": 0.5816340446472168, "avg_line_length": 29.348360061645508, "blob_id": "4fc76a00e9a5d61b872e817e5b0f35d675831f18", "content_id": "8507144033c57717c24b97497b051e0678921f80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7405, "license_type": "no_license", "max_line_length": 203, "num_lines": 244, "path": "/main.py", "repo_name": "sashatran/cal.ly", "src_encoding": "UTF-8", "text": "import os\nimport webapp2\nimport jinja2\nimport random\nimport json\nimport random\nimport string\nimport datetime\nimport time\n\nfrom google.appengine.api import mail\nfrom google.appengine.ext import db\n\nimport pickle\n\nclass PickleProperty(db.Property):\n def get_value_for_datastore(self, model_instance):\n value = getattr(model_instance, self.name, None)\n return pickle.dumps(value)\n\n def make_value_from_datastore(self, value):\n return pickle.loads(value)\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)\njinja_env.filters['datetime'] = datetime\n\n#database\nclass Schedule(db.Model):\n first_email = db.StringProperty()\n second_email = db.StringProperty()\n scheID = db.StringProperty()\n slot = PickleProperty()\n created = db.DateTimeProperty(auto_now_add = True)\n\nclass BaseHandler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\nclass MainPage(BaseHandler):\n def get(self):\n self.render(\"index.html\")\n\n def post(self):\n\n data = json.loads(self.request.body)\n print \"HELLLLOOOO\" + data\n\n second_email = data[0]\n google_cal = data[1]\n meeting_id = data[2]\n time = data[3]\n\n start = int(time[0])\n end = int(time[1])\n\n start = datetime.datetime.fromtimestamp(start)\n end = datetime.datetime.fromtimestamp(end)\n\n findFirstEmail = db.GqlQuery(\"SELECT * FROM Schedule WHERE scheID = :1\", meeting_id)\n\n for key in findFirstEmail:\n first_email = key.email\n\n message = mail.EmailMessage(sender=\"[email protected]\", subject=\"cal.ly: Meeting confirmed!\")\n message.to = [second_email, first_email]\n message.body = \"Congratulations. The meeting is confirmed! Click this link to add to your Google Calendar:\" + google_cal + \"With the time as following\" + \"Start:\" + str(start) + \"End:\" + str(end)\n\n\n # message.html = \"\"\"\n # <html><head></head><body>\n # Meeting confirmed!\n\n # Please click the link above to add the meeting to your Google Calendar. \n\n # Thank you for using cal.ly! \n\n\n # cal.ly team\n # </body></html>\n # \"\"\"\n\n # message.html = open('templates/emailHTML.html').read()\n message.send()\n\n self.response.write(\"succeed\")\n\nclass Confirm(BaseHandler):\n def get(self):\n self.render(\"confirm.html\")\n\n def post(self):\n # pop email from front of the quue\n j = json.loads(self.request.body)\n first_email = j.pop(0)\n second_email = j.pop(0)\n\n time_slot = []\n\n # generate random hash key\n def randomize():\n char = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(5)])\n return char\n\n for interval in j:\n start = int(interval['start'])\n duration = interval['duration']\n end = int(interval['end'])\n current_start = start\n current_end = current_start + duration\n\n while current_end <= end:\n time_slot.append([current_start, current_end])\n current_start = current_end\n current_end = current_start + duration\n\n #Put into database \n entities = [] \n\n char = randomize()\n db_record = Schedule()\n db_record.first_email = first_email\n db_record.second_email = second_email\n db_record.scheID = char\n db_record.slot = time_slot\n\n entities.append(db_record)\n db.put(entities)\n\n self.response.write(json.dumps(db_record.scheID))\n\n\n# For User 2: List out timeslot that was picked by user 1\nclass ListTimeSlot(BaseHandler):\n def get(self, hash_id):\n meeting_id = hash_id\n\n picked_time_slot = db.GqlQuery(\"SELECT * FROM Schedule WHERE scheID = :1\", hash_id)\n \n time_arr = {}\n j = 0\n\n for keys in picked_time_slot:\n first_user_email = keys.email\n\n for item in picked_time_slot:\n for i in item.slot:\n begin = datetime.datetime.fromtimestamp(i[0])\n finish = datetime.datetime.fromtimestamp(i[1])\n\n print begin.strftime('%d %b %Y, %R%p'), finish.strftime('%d %b %Y, %R%p')\n\n begin = str(begin.strftime('%d %b %Y, %R%p'))\n\n finish = str(finish.strftime('%d %b %Y, %R%p'))\n\n j += 1\n\n time_arr[j] = begin, finish, i[0], i[1]\n\n self.render(\"list.html\", time_arr=time_arr, picked_time_slot=picked_time_slot, first_user_email=first_user_email, meeting_id=meeting_id)\n\nclass Slot(BaseHandler):\n def get(self):\n self.render('slot.html')\n\nclass Picked(BaseHandler):\n\n def post(self):\n # pop email from front of the quue\n j = json.loads(self.request.body)\n print j\n first_email = j.pop(0)\n second_email = j.pop(0)\n\n time_slot = []\n\n # generate random hash key\n def randomize():\n char = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(5)])\n return char\n\n for interval in j:\n start = int(interval['start'])\n duration = interval['duration']\n end = int(interval['end'])\n current_start = start\n current_end = current_start + duration\n\n while current_end <= end:\n time_slot.append([current_start, current_end])\n current_start = current_end\n current_end = current_start + duration\n\n #Put into database \n entities = [] \n\n char = randomize()\n db_record = Schedule()\n db_record.first_email = first_email\n db_record.second_email = second_email\n db_record.scheID = char\n db_record.slot = time_slot\n\n entities.append(db_record)\n db.put(entities)\n\n self.response.write(json.dumps(db_record.scheID))\n\n meeting_id = db_record.scheID\n \n meeting_link = \"http://cal-ly.appspot.com/list/\" + db_record.scheID\n\n # time = data[2]\n\n # start = int(time[0])\n # end = int(time[1])\n\n # start = datetime.datetime.fromtimestamp(start)\n # end = datetime.datetime.fromtimestamp(end)\n\n findFirstEmail = db.GqlQuery(\"SELECT * FROM Schedule WHERE scheID = :1\", meeting_id)\n\n for key in findFirstEmail:\n first_email = key.first_email\n\n\n message = mail.EmailMessage(sender=\"[email protected]\", subject=\"cal.ly: Meeting confirmed!\")\n message.to = [second_email, first_email]\n message.body = \"Hey there, \" + first_email + \" has sent you a meeting invite.\" + \" Click on the link below to select a time slot \" + meeting_link\n\n message.send()\n\n # self.response.write(\"succeed\")\n\n\napp = webapp2.WSGIApplication([('/', MainPage),('/slot', Slot), ('/confirm', Confirm), ('/picked', Picked), ('/list/([a-zA-Z0-9]+)',ListTimeSlot)], debug=True)\n" }, { "alpha_fraction": 0.7151898741722107, "alphanum_fraction": 0.7151898741722107, "avg_line_length": 30.700000762939453, "blob_id": "5fbd08437e79d35797dbc725fb0028d2d4e05d89", "content_id": "1bbe94e860715fc73408dd43c9ce4c8b4e1b4796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/templates/common.py", "repo_name": "sashatran/cal.ly", "src_encoding": "UTF-8", "text": "from google.appengine.ext import db\nimport pickle\n\nclass PickleProperty(db.Property):\n def get_value_for_datastore(self, model_instance):\n value = getattr(model_instance, self.name, None)\n return pickle.dumps(value)\n\n def make_value_from_datastore(self, value):\n return pickle.loads(value)" }, { "alpha_fraction": 0.7410404682159424, "alphanum_fraction": 0.744508683681488, "avg_line_length": 40.14285659790039, "blob_id": "9ac7bb8ed998af8b433763da158e1a8e25218b46", "content_id": "06eaf3ec0dd0777784e06729a56adb1e8b3770eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/handle_bounced_email.py", "repo_name": "sashatran/cal.ly", "src_encoding": "UTF-8", "text": "import logging\nimport webapp2\nfrom google.appengine.api import mail\nfrom google.appengine.ext.webapp.mail_handlers import BounceNotification\nfrom google.appengine.ext.webapp.mail_handlers import BounceNotificationHandler\n\nclass LogBounceHandler(BounceNotificationHandler):\n def receive(self, bounce_message):\n logging.info('Received bounce post ... [%s]', self.request)\n logging.info('Bounce original: %s', bounce_message.original)\n logging.info('Bounce notification: %s', bounce_message.notification)\n\nclass BounceHandler(webapp2.RequestHandler):\n def post(self):\n bounce = BounceNotification(self.request.POST)\n logging.info('Bounce original: %s', bounce.original)\n logging.info('Bounce notification: %s', bounce.notification)\n\napp = webapp2.WSGIApplication([\n ('/_ah/bounce', LogBounceHandler),\n], debug=True)\n\n" } ]
3
MaxxDelusional/SpiderPi
https://github.com/MaxxDelusional/SpiderPi
a3c05e11f4bffc5a419f7c2f4081f4d78ff4e10b
aedc5d714faf2e79d949347d028e8edce6b17df8
3bb7a1909c91e4077baf538bb1b52b6086e71311
refs/heads/master
2016-08-12T17:13:40.819208
2015-11-07T19:56:22
2015-11-07T19:56:22
45,716,431
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5573394298553467, "alphanum_fraction": 0.5802752375602722, "avg_line_length": 26.25, "blob_id": "1b2b250e37f4787e613f2a45a46150097e6990f8", "content_id": "2df7b72c24b7a3238407dab91963c195dbb5db08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "permissive", "max_line_length": 70, "num_lines": 16, "path": "/dave.py", "repo_name": "MaxxDelusional/SpiderPi", "src_encoding": "UTF-8", "text": "import pygame\n\npygame.init()\n\n# to spam the pygame.KEYDOWN event every 100ms while key being pressed\npygame.key.set_repeat(100, 100)\n\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n print 'go forward'\n if event.key == pygame.K_s:\n print 'go backward'\n if event.type == pygame.KEYUP:\n print 'stop'\n" }, { "alpha_fraction": 0.5515239238739014, "alphanum_fraction": 0.55878084897995, "avg_line_length": 18.408451080322266, "blob_id": "e65a9d691c054e1e533bf3dd6b66edb4863e2127", "content_id": "757f2e895f44c1f4325f446bc5e35788a34f1fc0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1378, "license_type": "permissive", "max_line_length": 66, "num_lines": 71, "path": "/main.py", "repo_name": "MaxxDelusional/SpiderPi", "src_encoding": "UTF-8", "text": "import sys,tty,termios,time\nimport RPi.GPIO as GPIO\n\nLEFT_PIN = 26\nRIGHT_PIN = 16\nSTATUS_PIN = 13\n\nclass _Getch:\n def __call__(self):\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef forward():\n GPIO.output(LEFT_PIN, True)\n time.sleep(0.1)\n GPIO.output(RIGHT_PIN, True)\n\ndef left():\n GPIO.output(LEFT_PIN, True)\n GPIO.output(RIGHT_PIN, False)\n\ndef right():\n GPIO.output(LEFT_PIN, False)\n GPIO.output(RIGHT_PIN, True)\n\ndef stop():\n GPIO.output((LEFT_PIN, RIGHT_PIN), False)\n\ndef get():\n inkey = _Getch()\n while(1):\n k=inkey()\n if k!='':break\n\n if k == 'w':\n forward()\n elif k == 'a':\n left()\n elif k == 'd':\n right()\n elif k == 's':\n stop()\n elif k == 'p':\n return False\n\n return True\n\ndef main():\n print \"Hello Spider\"\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(LEFT_PIN, GPIO.OUT)\n GPIO.setup(RIGHT_PIN, GPIO.OUT)\n GPIO.setup(STATUS_PIN, GPIO.OUT)\n\n GPIO.output(STATUS_PIN, True)\n while get():\n print \"Still Running\"\n\n print \"Exiting\"\n GPIO.output(STATUS_PIN, False)\n GPIO.cleanup()\n\nif __name__=='__main__':\n main()\n" } ]
2
brialparker/HT_processing
https://github.com/brialparker/HT_processing
766f76dcf1784e3fd3a4ddf87091fe7cc71908db
a69828d778cb33918cd294f82c0191c9a49aaeae
3895f1c1b0a8ccd72558b9d6a06410069399246f
refs/heads/master
2021-01-10T14:20:49.709170
2015-11-05T16:21:30
2015-11-05T16:21:30
45,622,643
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5285714268684387, "alphanum_fraction": 0.5285714268684387, "avg_line_length": 22.66666603088379, "blob_id": "79b5bb64f099d7b8cc9ab88afbe5d2a5780493da", "content_id": "15c4066a79cac5e895a4bfb2d2399d201bfa3229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 70, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/create_zip.sh", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "for dir in $(find . -type d); do\n zip -r -X \"$dir\".zip \"$dir\"\ndone" }, { "alpha_fraction": 0.46739131212234497, "alphanum_fraction": 0.46739131212234497, "avg_line_length": 14.5, "blob_id": "c60461e5e22ef962f7f2d5694e8c4764cfc6546d", "content_id": "bf1ecfd8a6042c340c749e68f3464470a0a7b6fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 92, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/strip_form_feed.sh", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor file in $(find . -name '*.txt')\ndo\n sed -i \"\" 's/\\^L//' \"$file\"\ndone" }, { "alpha_fraction": 0.6059113144874573, "alphanum_fraction": 0.6502463221549988, "avg_line_length": 28.14285659790039, "blob_id": "81cf6abd343ab98295d03dd8ea79544edeba6097", "content_id": "04eb53d3bd72b598afbcf553e3a4e0c9770e4d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 203, "license_type": "no_license", "max_line_length": 101, "num_lines": 7, "path": "/generate_meta_yml.sh", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor file in $(find . -name '00000001.tif');\ndo\n parentdir=$(dirname \"$file\")\n exiftool -j -FileModifyDate -XResolution \"$file\" | python json2yaml.py > $parentdir/meta.yml\ndone" }, { "alpha_fraction": 0.7540983557701111, "alphanum_fraction": 0.8155737519264221, "avg_line_length": 60, "blob_id": "bd72988a95722e62c5b00b36d27aeb043b99b08a", "content_id": "fa7a5797b9d2fe1b5008024ee7f4b73389b30602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 244, "license_type": "no_license", "max_line_length": 120, "num_lines": 4, "path": "/README.md", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "# HT_processing\nA set of scripts put together for processing our vendor scans and local metadata for ingest to HathiTrust\n\nSee [this gist](https://gist.github.com/brialparker/d24860114d335d774a6d) for information on the workflow of the process\n" }, { "alpha_fraction": 0.5051546096801758, "alphanum_fraction": 0.5257731676101685, "avg_line_length": 31.66666603088379, "blob_id": "d5cec4166134b2b70db024a5042db41a53dc49e4", "content_id": "d0519979e6c58d03482524dd9289fd43dc80d4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 97, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/create_checksum.sh", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "for dir in $(find . -type d); do\n md5 -r \"$dir\"/* | sed \"s:$dir/::\" > \"$dir/checksum.md5\"\ndone" }, { "alpha_fraction": 0.6552872657775879, "alphanum_fraction": 0.6627810001373291, "avg_line_length": 33.342857360839844, "blob_id": "f24c5ec3a533ad039b66db9103d5ab12f897c6c4", "content_id": "24d33256d4f4f5ba50da5fed00ab12ccb42cc689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 128, "num_lines": 35, "path": "/create_meta.py", "repo_name": "brialparker/HT_processing", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport json, yaml, sys, datetime\nimport dateutil.parser as parser\n\ndata = json.load(sys.stdin)\n\n#split the date apart to replace the non-ISO ':' with a '-' then put it back together again\n\ndef datesplit(CreateDate):\n splitdate = CreateDate.split(\" \")\n splitdate[0]\n splitdate[1]\n splitdate[0]=splitdate[0].replace(\":\",\"-\")\n capture_date=\"T\".join(splitdate)\n return capture_date\n\n#rename the keys according to HT specs, while also providing some of the constant values\n\nfor item in data:\n item.pop(\"SourceFile\")\n# item[\"capture_date\"] = datesplit(item.pop(\"CreateDate\")) CreateDate does not consistently appear in metadata - can't use\n item[\"capture_date\"] = datesplit(item.pop(\"FileModifyDate\"))\n item[\"scanner_make\"] = \"NIKON CORPORATION\"\n item[\"scanner_model\"] = \"NIKON D810\"\n item[\"scanner_user\"] = \"Creekside Digital\"\n item[\"contone_resolution_dpi\"] = item.pop(\"XResolution\")\n item[\"scanning_order\"] = \"right-to-left\"\n item[\"reading_order\"] = \"right-to-left\"\n\n#print data into yaml format\n\nyml = yaml.safe_dump(data[0],default_flow_style=False, stream=None)\n\nprint(yml)" } ]
6
whoislewys/cookbook
https://github.com/whoislewys/cookbook
551b6f7003b9b8763b85929959f411ee1d1575aa
2a1d53f84ce3dd885c4215f0d67b84617ed04e5d
f96a9bb5568a644f9a9f551744f9c299532b2d49
refs/heads/master
2022-11-09T04:53:17.868072
2022-10-25T17:36:58
2022-10-25T17:36:58
207,707,733
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5649038553237915, "alphanum_fraction": 0.6081730723381042, "avg_line_length": 26.799999237060547, "blob_id": "eba96f61d77b97f6a9a58605878d9a962542efc4", "content_id": "82e820b5de55cb24aabeabeec9bc758952e505a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/py/missing_item_list_diff.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "def find_missing_with_set_in(shortList, bigList):\n set1 = set(shortList)\n for x in list2:\n if x not in set1:\n return x\n\ndef find_missing_meme_solution(lilMeme, bigMeme):\n return sum(bigMeme) - sum(lilMeme)\n\nif __name__ == '__main__':\n list1 = [1, 2, 3, 4]\n list2 = [1, 2, 3, 4, 5]\n\n print(find_missing_with_set_in(list1, list2))\n print(find_missing_meme_solution(list1, list2))" }, { "alpha_fraction": 0.742553174495697, "alphanum_fraction": 0.7531914710998535, "avg_line_length": 23.6842098236084, "blob_id": "bb57a47645c7f3ef1d57374e41bac6c26685c203", "content_id": "d12bd5c4b8344aa8ec302f4a2fa5e0a77898d19c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 470, "license_type": "no_license", "max_line_length": 87, "num_lines": 19, "path": "/business/user-interview-qs.md", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "GOOD ENRRGY VIBESS YEAHYUHHH\n\nOpen with a compliement about their facility, them personally if you can.\n\nASK THESE Questions about *x*\n\n1. What's the hardest part?\n\n2. Tell me about the last time you experienced that?\n\n3. Why was it hard?\n\n4. What have you tried to fix it?\n\n5. What don't you love about that?\n\nIf they're interested, remember to end by asking for an e-mail so I can follow up later\n\nIf they're not interested, ask for a friend that might be interested\n\n" }, { "alpha_fraction": 0.5705128312110901, "alphanum_fraction": 0.6089743375778198, "avg_line_length": 17.352941513061523, "blob_id": "ab7d5533d22e8b5a8dab47920045dcad33b73318", "content_id": "43b821ddb29a06ac69670998d435dca5a104d309", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/py/quickselect.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "''' Given an unsorted list, find the kth smallest number in that list '''\n\n\ndef kth_smallest(nums, k):\n return sorted(nums)[k-1]\n\n\ndef quick_select(nums, k):\n # todo: \n pass\n\n\nif __name__ == '__main__':\n list1 = [1, 3, 4, 0, 9]\n print(kth_smallest(list1, 3))\n # solution: 3\n print(quick_select(list1, 3))\n" }, { "alpha_fraction": 0.7559523582458496, "alphanum_fraction": 0.7559523582458496, "avg_line_length": 55, "blob_id": "eae1e55fb026ea774e3668f8e052eddbe6336c61", "content_id": "412184aa0827430a5ebe6e063635aa74fde5af09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 504, "license_type": "no_license", "max_line_length": 107, "num_lines": 9, "path": "/UPDATE_LAST_MODIFIED.gs", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "// Responds to a change in `input` Range by setting current timestamp in the cell calling this function\nfunction UPDATE_LAST_MODIFIED(input) {\n // general docs\n // https://developers.google.com/apps-script/reference/spreadsheet\n var sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet();\n // https://developers.google.com/apps-script/reference/utilities/utilities#formatDate(Date,String,String)\n var time = Utilities.formatDate(new Date(), \"GMT\", \"yyyy-MM-dd'T'HH:mm'Z'\");\n return time;\n}\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 30, "blob_id": "06bdca7bbbf3b80df82f6279f5dbe1fb821f1562", "content_id": "1119b305cdcc138e76d89064ef0b700bfce535ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/README.md", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "some bite sized pieces of code\n" }, { "alpha_fraction": 0.47666195034980774, "alphanum_fraction": 0.49646392464637756, "avg_line_length": 24.25, "blob_id": "fc07e2b468fbc92b90153c58d23b556c451d1839", "content_id": "8f793d4e02ae8f05e15892d69731e495cfe2ad52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/py/fib.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "class fibonacci():\n def nextFib(self, n):\n if n == 0:\n return 0;\n if n == 1:\n return 1;\n return self.nextFib(n - 1) + self.nextFib(n - 2)\n\n def memoizedNextFib(self, n, cache):\n if n == 0:\n return 0;\n if n == 1:\n return 1;\n\n if n not in cache:\n leftRec = self.memoizedNextFib(n - 1, cache)\n rightRec = self.memoizedNextFib(n - 2, cache)\n cache[n] = leftRec + rightRec\n return leftRec + rightRec\n else:\n return cache[n]\n\n\nif __name__ == '__main__':\n fibonacci = fibonacci()\n\n print(fibonacci.nextFib(4))\n print(fibonacci.memoizedNextFib(4, {}))\n" }, { "alpha_fraction": 0.5798491835594177, "alphanum_fraction": 0.5894448161125183, "avg_line_length": 25.035715103149414, "blob_id": "bf69c82c82c858d755544eaba92293f13bf76c69", "content_id": "43910c2aaeacf12a7a84491cbe91a4a67af7cda8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "no_license", "max_line_length": 86, "num_lines": 56, "path": "/py/depth_first.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "# from a given given graph \nfrom collections import defaultdict \n \n\nclass Graph: \n # A directed graph using adjacency list representation \n def __init__(self): \n # default dictionary to store graph \n self.graph = defaultdict(list) \n\n \n def addEdge(self, u, v): \n # function to add an edge to graph \n self.graph[u].append(v) \n \n\n def DFSUtil(self, v, visited): \n # Mark current node as visited\n visited[v] = True\n print(v)\n\n # iterate through nodes adjacent to current node\n for adj in self.graph[v]:\n if visited[adj] is False:\n self.DFSUtil(adj, visited)\n \n\n def DFS(self, v): \n '''\n Parameters:\n v: the vertex (node) to start the DFS from\n The basic idea in graph DFS is to keep a list of which nodes you have visited,\n because unlike in a tree, a graph can have cycles.\n Use a visited array to avoid infinite loops\n ''' \n # Mark all the vertices as not visited \n visited = [False] * (len(self.graph)) \n \n # Call the recursive helper function \n # to print DFS traversal \n self.DFSUtil(v, visited) \n \n# Driver code \n \n# Create a graph given \n# in the above diagram \ng = Graph() \ng.addEdge(0, 1) \ng.addEdge(0, 2) \ng.addEdge(1, 2) \ng.addEdge(2, 0) \ng.addEdge(2, 3) \ng.addEdge(3, 3) \n \nprint(\"Following is DFS from (starting from vertex 2)\") \ng.DFS(2) " }, { "alpha_fraction": 0.5826416611671448, "alphanum_fraction": 0.6215319037437439, "avg_line_length": 35.36206817626953, "blob_id": "46af6266533a6f5bd666c6ad9960343416ad86cd", "content_id": "dad52ab8a3fa1f11f9ccd3cddb92f718a192bf55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4217, "license_type": "no_license", "max_line_length": 147, "num_lines": 116, "path": "/py/knapsack.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass Item(object):\n weight = 0\n value = 0\n\n def __init__(self, weight, value):\n self.weight = weight\n self.value = value\n\n def __repr__(self):\n return 'Item \\n' + 'weight: ' + str(self.weight) + '\\n' + 'value: ' + str(self.value) + '\\n'\n\n\ndef basic_knapsack(availableWeight, items, numItems):\n maxValue = 0\n for i, item in enumerate(items):\n if item.weight <= availableWeight:\n tempValue = basic_knapsack(availableWeight - item.weight, items, numItems)\n possibleMaxValue = tempValue + item.value\n if possibleMaxValue > maxValue:\n maxValue = possibleMaxValue\n return maxValue\n\n\n# Returns the maximum value that can be put in a knapsack with capacity availableWeight\ndef knapsack_01(availableWeight, items, n):\n if n == 0 or availableWeight == 0 : \n # base case\n return 0\n \n if (items[n-1].weight > availableWeight): \n # If weight of the nth item weighs more than the current knapsack capacity,\n # move on to the next item\n return knapsack_01(availableWeight, items, n-1) \n \n else: \n # return the maximum of two cases: \n # (1) nth item included \n # (2) not included \n # get combs with the nth item, making sure to subtract it's weight from the sack capacity\n temp1 = items[n-1].value + knapsack_01(availableWeight-items[n-1].weight, items, n-1)\n # get combs w/o the nth item\n temp2 = knapsack_01(availableWeight, items, n-1)\n return max(temp1, temp2)\n\n\n# originally i wanted to use a dictionary for faster lookup, but couldn't decide on what to use as a key\n# i dont think you can use availableWeight + n, because of a case where 2 different items have the same sum(availableWeight +n)\n# for ex. <Item>[Item(weight: 10, value: 1), Item(weight: 9, value: 200)]\n# the first item's availableWeight is 10, n is 0. 10 + 0 = 10.\n# the second item's availableWeight is 9, n is 1. 9 + 1 = 10.\n# so, the memoization lookup would resolve to the value of whichever was computed first, totally ignoring the different values associated with them\n# maybe n * c, or another function would work? look into this laterrr\n\n# just use array for now\ndef knapsack_01_memoized(availableWeight, items, n, memo):\n if memo[availableWeight-1][n-1] != None:\n # before doiGng any calculation, check if the memo has done it already\n return memo[availableWeight-1][n-1]\n\n if n == 0 or availableWeight == 0 : \n # base case\n result = 0\n \n if (items[n-1].weight > availableWeight): \n # If weight of the nth item weighs more than the current knapsack capacity,\n # move on to the next item\n result = knapsack_01(availableWeight, items, n-1) \n \n else: \n # return the maximum of two cases: \n # (1) nth item included \n # (2) not included \n # get combs with the nth item, making sure to subtract it's weight from the sack capacity\n temp1 = items[n-1].value + knapsack_01(availableWeight-items[n-1].weight, items, n-1)\n # get combs w/o the nth item\n temp2 = knapsack_01(availableWeight, items, n-1)\n result = max(temp1, temp2)\n # memoize this return value before returning it\n memo[availableWeight-1][n-1] = result\n return result\n\n\n\nif __name__ == '__main__':\n capacity = 165\n items1 = [\n Item(39, 92),\n Item(22, 57),\n Item(29, 49),\n ]\n print(basic_knapsack(capacity, items1, len(items1))) \n\n # source: p01\n # https://people.sc.fsu.edu/~jburkardt/datasets/knapsack_01/knapsack_01.html\n items2 = [\n Item(23, 92),\n Item(31, 57),\n Item(29, 49),\n Item(44, 68),\n Item(53, 60),\n Item(38, 43),\n Item(63, 67),\n Item(85, 84),\n Item(89, 87),\n Item(82, 72)\n ]\n # solution = [1, 1, 1, 1, 0, 1, 0, 0, 0, 0]\n # total val: 309\n print(knapsack_01(capacity, items2, len(items2)))\n # todo: figure out dimensionality of these bastards\n memo = [[None for i in range(capacity)] for i in range(len(items2))]\n print(memo)\n print(knapsack_01_memoized(capacity, items2, len(items2), memo))" }, { "alpha_fraction": 0.6928104758262634, "alphanum_fraction": 0.6928104758262634, "avg_line_length": 18.125, "blob_id": "faf9ed6b944e9c3fb2119e096158b2d7048e26fb", "content_id": "c4627656a7a08a565bb00e8aa811e90ccc71de05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/py/requests_get.py", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "import requests\n\nurl = 'https://google.com'\nhtml = requests.get(url)\nprint(html.text)\n\nwith open('text.html', 'w+') as infile:\n infile.write(html.text)\n" }, { "alpha_fraction": 0.6625258922576904, "alphanum_fraction": 0.6763284802436829, "avg_line_length": 32.67441940307617, "blob_id": "fa6559ce04633034e4692ebb154453370f0e0566", "content_id": "83b77ecb54c40730ab87265c9b394291210641e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1452, "license_type": "no_license", "max_line_length": 193, "num_lines": 43, "path": "/pharo/pharo-notes.md", "repo_name": "whoislewys/cookbook", "src_encoding": "UTF-8", "text": "_______________________________________________________\n> NOTES on Pharo (a language of the smalltalk paradigm)\n _______________________________________________________\n\n Book on learning Pharo:\n http://books.pharo.org/updated-pharo-by-example/pdf/2018-09-29-UpdatedPharoByExample.pdf\n\n#### What is Pharo?\n Pharo is a lot of things\n\n * Simple dynamic language.\n\n * Erlang-like hotswap\n\n * IDE included\n\n * Nice std library. Now, with FFI! (they say, hopefully it's nice)\n\n* Hippie programmer culture (changes welcome, man)\n\n * Source control meant to be used with integrated `sources` file\n\n * [seems super useful ->] Generating image files for live snapshots of a running system.\n\n#### Open Questions\n\n * Why does it use a VM? Instead of compile to native bytecode?\n\n * Pattern matching? What does it mean 'receiving object decides what to do with message'?\n\n * Is Pharo owned by the NSA? why does it need full name to make a method?\n\n * How does the state persistence work? Is save and quit in Pharo like `Hibernate` or `Sleep` in an OS?\n\n * Is Smalltalk/Pharo even worth learning?\n\n#### General cool stuff\n\n* Clicking on something brings up snack/toast on keyboard shortcut to do that thing (might be annoying sometime but hey ๐Ÿคท)\n\n * WOWWW Smalltalk came before SQL. Smalltalk dev started in 1969, Codd from IBM published his spec for a RDBMS in 1970. Oracle database (first commercially successful RDBMS) came around 1979.\n\n * \n" } ]
10
elixdlol/lucid-dream
https://github.com/elixdlol/lucid-dream
3e1da6cd7a8dba734bea19855553bfb4519d8ba3
5604dab7ef224d11985645ab7eb88731e431f334
1a8ed88f855c4bafb847b6dfa091ac2a27049b43
refs/heads/master
2023-01-19T05:53:56.917405
2020-03-31T08:57:07
2020-03-31T08:57:07
219,281,732
0
0
null
2019-11-03T10:08:11
2020-03-31T08:57:31
2023-01-07T14:10:05
C#
[ { "alpha_fraction": 0.5999229550361633, "alphanum_fraction": 0.6035191416740417, "avg_line_length": 37.5445556640625, "blob_id": "d532a6c3add491939a592eaa3215ac3d5308132a", "content_id": "52907587d739cd3e52d7bd6c6f55a75e04e4329d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 7788, "license_type": "no_license", "max_line_length": 121, "num_lines": 202, "path": "/lucidDBManager/lucidDBManager/mongoDB/mongoDBServer.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing MongoDB.Driver;\nusing MongoDB.Bson;\nusing System.IO;\nusing Newtonsoft.Json.Bson;\nusing Newtonsoft.Json;\nusing System.Net;\nusing lucidDBManager.Data;\nusing MongoDB.Bson.IO;\nusing Newtonsoft.Json.Linq;\nusing static lucidDBManager.Data.BasicData;\n\nnamespace lucidDBManager.mongoDB\n{\n public class MongoDBServer\n {\n private IMongoDatabase _db;\n\n #region Main Functions\n public void initDB(string dbName)\n {\n MongoClient dbClient = new MongoClient(\"mongodb://127.0.0.1:27017\");\n\n //Database List \n var dbList = dbClient.ListDatabases().ToList();\n\n //Get Database and Collection \n _db = dbClient.GetDatabase(dbName);\n var collList = _db.ListCollections().ToList();\n }\n\n public void saveRecord(object message, string collectionName)\n {\n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(collectionName);\n string jsonFile = Newtonsoft.Json.JsonConvert.SerializeObject(message);\n collection.InsertOne(BsonDocument.Parse(jsonFile));\n }\n\n public void deleteMessage(string collectionName, BsonDocument element)\n {\n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(collectionName);\n\n collection.FindOneAndDelete(element);\n }\n\n\n public List<SystemTracks> getTracksByID(long trackID)\n {\n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(\"SystemTrack\");\n var filter = Builders<BsonDocument>.Filter.Eq(\"systemTracks.trackID\", trackID);\n var result = collection.Find(filter).ToList();\n var listOfTMAMessage = new List<SystemTracks>();\n foreach (var mes in result)\n {\n var x = new JsonWriterSettings { OutputMode = JsonOutputMode.Strict };\n JObject json = JObject.Parse(mes.ToJson<MongoDB.Bson.BsonDocument>(x));\n\n SystemTracks tmaMessage = Newtonsoft.Json.JsonConvert.DeserializeObject<SystemTracks>(json.ToString());\n\n SystemTracks UpdatedMessge = new SystemTracks();\n UpdatedMessge.timeStamp = tmaMessage.timeStamp;\n\n bool isTrakIDFounded = false;\n\n for (int i = 0; i < tmaMessage.systemTracks.Count; i++)\n {\n\n if (tmaMessage.systemTracks[i].trackID == trackID)\n {\n isTrakIDFounded = true;\n TrackData neededTrack = new TrackData()\n {\n creationTime = tmaMessage.systemTracks[i].creationTime,\n relativeBearing = tmaMessage.systemTracks[i].relativeBearing,\n relativeBearingRate = tmaMessage.systemTracks[i].relativeBearingRate,\n trackID = tmaMessage.systemTracks[i].trackID,\n trackState = tmaMessage.systemTracks[i].trackState\n };\n\n UpdatedMessge.systemTracks = new List<TrackData>();\n UpdatedMessge.systemTracks.Add(neededTrack);\n }\n }\n if (isTrakIDFounded)\n {\n listOfTMAMessage.Add(UpdatedMessge);\n }\n\n }\n\n return listOfTMAMessage;\n }\n\n public List<SystemTracks> getFullTrackMessagesByTime(TimeType startTime, TimeType endTime)\n {\n List<SystemTracks> TrackMessages = new List<SystemTracks>();\n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(\"SystemTrack\");\n var filter1 = Builders<BsonDocument>.Filter.Gt(\"timeStamp.seconds\", startTime.seconds);\n var filter2 = Builders<BsonDocument>.Filter.Lt(\"timeStamp.seconds\", endTime.seconds);\n var mainFilter = Builders<BsonDocument>.Filter.And(filter1, filter2);\n var result = collection.Find(mainFilter).ToList();\n\n foreach (var trackRecord in result)\n {\n var tmp = new JsonWriterSettings { OutputMode = JsonOutputMode.Strict };\n JObject json = JObject.Parse(trackRecord.ToJson<MongoDB.Bson.BsonDocument>(tmp));\n\n SystemTracks trackMessage = Newtonsoft.Json.JsonConvert.DeserializeObject<SystemTracks>(json.ToString());\n\n TrackMessages.Add(trackMessage);\n }\n\n return TrackMessages;\n }\n\n public List<OwnBoatData> getOwnBoatByTime(TimeType startTime, TimeType endTime)\n {\n List<OwnBoatData> OwnBoatMessages = new List<OwnBoatData>();\n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(\"OwnBoat\");\n var filter1 = Builders<BsonDocument>.Filter.Gt(\"timeStamp.seconds\", startTime.seconds);\n var filter2 = Builders<BsonDocument>.Filter.Lt(\"timeStamp.seconds\", endTime.seconds);\n var mainFilter = Builders<BsonDocument>.Filter.And(filter1, filter2);\n var result = collection.Find(mainFilter).ToList();\n\n foreach (var ownBoatRecord in result)\n {\n var tmp = new JsonWriterSettings { OutputMode = JsonOutputMode.Strict };\n JObject json = JObject.Parse(ownBoatRecord.ToJson<MongoDB.Bson.BsonDocument>(tmp));\n\n OwnBoatData ownboatMessage = Newtonsoft.Json.JsonConvert.DeserializeObject<OwnBoatData>(json.ToString());\n\n OwnBoatMessages.Add(ownboatMessage);\n }\n\n return OwnBoatMessages;\n }\n\n public List<OwnBoatData> getAllOwnBoatData()\n {\n List<OwnBoatData> result = new List<OwnBoatData>();\n\n var allMessages = getMessages(\"OwnBoat\");\n\n foreach (var message in allMessages)\n {\n var x = new JsonWriterSettings { OutputMode = JsonOutputMode.Strict };\n JObject json = JObject.Parse(message.ToJson<MongoDB.Bson.BsonDocument>(x));\n\n OwnBoatData ownBoatRecord = Newtonsoft.Json.JsonConvert.DeserializeObject<OwnBoatData>(json.ToString());\n\n result.Add(ownBoatRecord);\n }\n\n return result;\n }\n\n #endregion\n #region Private Functions\n\n private string ToBson<T>(T value)\n {\n using (MemoryStream ms = new MemoryStream())\n using (BsonDataWriter datawriter = new BsonDataWriter(ms))\n {\n JsonSerializer serializer = new JsonSerializer();\n serializer.Serialize(datawriter, value);\n return Convert.ToBase64String(ms.ToArray());\n }\n\n }\n\n private T FromBson<T>(string base64data)\n {\n byte[] data = Convert.FromBase64String(base64data);\n\n using (MemoryStream ms = new MemoryStream(data))\n using (BsonDataReader reader = new BsonDataReader(ms))\n {\n JsonSerializer serializer = new JsonSerializer();\n return serializer.Deserialize<T>(reader);\n }\n }\n\n private List<BsonDocument> getMessages(string collectionName)\n {\n //READ \n IMongoCollection<BsonDocument> collection = _db.GetCollection<BsonDocument>(collectionName);\n var documentsList = collection.Find(new BsonDocument()).ToList();\n foreach (var item in documentsList)\n {\n Console.WriteLine(item.ToString());\n }\n\n return documentsList;\n }\n #endregion\n }\n}\n" }, { "alpha_fraction": 0.4563380181789398, "alphanum_fraction": 0.46197181940078735, "avg_line_length": 26.30769157409668, "blob_id": "b84b467ec8e089058190d69b504c25cad4d4e66c", "content_id": "7e103b3568f9e659f930183c054ae3055e942220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 91, "num_lines": 39, "path": "/TrackBeamParser/TrackBeamParser/CASSegmentManger.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public static class CASSegmentManger\n {\n private static CASSegment segToBuffer;\n\n public static void BufferManger(CASSubSegment subSegmentFromUdp)\n {\n if (subSegmentFromUdp.segID == 1)\n {\n if (segToBuffer != null)\n {\n if (segToBuffer.IsValid)\n {\n //if(segToBuffer.Data.Count < 192*64)\n //{\n BeamsBuffer.WriteBeamsFromDictionary(segToBuffer.GetBeamsValues());\n BeamsBuffer.Heading = segToBuffer.Heading;\n //}\n }\n }\n\n\n segToBuffer = new CASSegment(subSegmentFromUdp);\n }\n else\n {\n if (segToBuffer != null)\n {\n segToBuffer.AddSubSegment(subSegmentFromUdp);\n }\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.46352413296699524, "alphanum_fraction": 0.4753086566925049, "avg_line_length": 26.41538429260254, "blob_id": "47cbd5c566976e4d102f924b1e7f7ae509fc1f3e", "content_id": "9c8b07b7f463fd6d67aaeadd3c58ce0a97f198c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1784, "license_type": "no_license", "max_line_length": 93, "num_lines": 65, "path": "/TrackBeamParser/TrackBeamParser/CASSegment.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n class CASSegment\n {\n public bool IsValid;\n public List<byte> Data;\n public Dictionary<int, CASSubSegment> SubSegments;\n public double Heading { get; set; }\n\n public CASSegment(CASSubSegment subSegmentToAdd)\n {\n IsValid = true;\n Data = new List<byte>();\n SubSegments = new Dictionary<int, CASSubSegment>();\n Data.AddRange(subSegmentToAdd.data);\n SubSegments.Add(1, subSegmentToAdd);\n }\n\n public void AddSubSegment(CASSubSegment subSegmentToAdd)\n {\n if (subSegmentToAdd.segID == SubSegments.Last().Key + 1)\n {\n try\n {\n if(subSegmentToAdd.segID == 10)\n {\n Heading = BitConverter.ToInt16(subSegmentToAdd.heading);\n }\n Data.AddRange(subSegmentToAdd.data);\n SubSegments.Add(subSegmentToAdd.segID, subSegmentToAdd);\n }\n catch\n {\n IsValid = false;\n }\n \n }\n else\n {\n IsValid = false;\n }\n }\n\n public byte[][] GetBeamsValues()\n {\n byte[][] beams = new byte[192][];\n Dictionary<int, List<byte>> dictionaryToSend = new Dictionary<int, List<byte>>();\n\n for (int i = 0; i < 192; i++)\n {\n beams[i] = new byte[64];\n Array.Copy(Data.GetRange(i * 64, 64).ToArray(), beams[i], 64);\n }\n\n return beams;\n\n }\n\n }\n}\n" }, { "alpha_fraction": 0.6472846865653992, "alphanum_fraction": 0.6472846865653992, "avg_line_length": 24.676055908203125, "blob_id": "18dc7163a3ec8fe0c484cd1f5574d21590c2817a", "content_id": "2e60831786be125736de27f36e8f51fb8af4e8c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1825, "license_type": "no_license", "max_line_length": 88, "num_lines": 71, "path": "/lucidDBManager/LucidDream_BDT_Microservice/src/BDT_CAS_OriginalMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace LucidDreamSystem\n{\n public class BDT_CAS_OriginalMessage\n {\n public TimeStampType timeStamp = new TimeStampType();\n public List<OriginalSystemTrack> systemTracks = new List<OriginalSystemTrack>();\n }\n public class OriginalSystemTrack\n {\n public long trackId;\n public long state;\n public float bearing;\n public IsBearingRateValidType bearingRate = new IsBearingRateValidType();\n public float s_n_ratio;\n public float target_level;\n public long approach_receed_indicator;\n public long constant_bearing_warning;\n public FilterValidType bandwidth = new FilterValidType();\n public float integration_time;\n public long integration_time_nominal;\n public long integrat_time_selection_mode;\n public TimeStampType timeStamp = new TimeStampType();\n public List<AngleValidType> rawBearingCndidates = new List<AngleValidType>();\n }\n\n public class IsBearingRateValidType\n {\n public bool valid;\n public float value;\n }\n\n public class FilterValidType\n {\n public bool valid;\n public float lower;\n public float upper;\n }\n\n public class TimeStampType\n {\n public HmssType time = new HmssType();\n public YmdType date = new YmdType();\n }\n\n public class HmssType\n {\n public long hours;\n public long minutes;\n public long seconds;\n public long c_seconds;\n }\n\n public class YmdType\n {\n public long year;\n public long month;\n public long day;\n }\n\n public class AngleValidType\n {\n public bool valid;\n public float value;\n }\n}\n" }, { "alpha_fraction": 0.47756874561309814, "alphanum_fraction": 0.484804630279541, "avg_line_length": 24.127273559570312, "blob_id": "7e9dacf60030a9691a18692cba27ef2afe54919f", "content_id": "01f984354b84942baed1741c6b9ee1fa73568a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 95, "num_lines": 55, "path": "/Simulator/BeamBusCas/BeamBusCasSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.Net;\nusing System.Net.Sockets;\n\nnamespace BeamBusCas\n{\n public static class BeamBusCasSender\n {\n static int subSegmentNum;\n static byte[][] subSements;\n static UDPSocket client;\n public static bool isSending = false;\n public static bool _isFirst = true;\n\n\n public static void SendMessage()\n {\n if (_isFirst)\n {\n _isFirst = false;\n subSements = FileEdit.GetRecording(Properties.Settings.Default.Recording_path);\n client = new UDPSocket();\n client.Client(Properties.Settings.Default.IP,\n Properties.Settings.Default.Port);\n subSegmentNum = subSements.Length;\n }\n\n\n while (isSending)\n {\n for (int j = 0; j < subSegmentNum; j++)\n {\n client.Send(subSements[j]);\n delayInMs(0.1);\n if (!isSending)\n {\n break;\n }\n }\n }\n }\n\n private static void delayInMs(double ms)\n {\n for (int i = 0; i < ms * 280000; i++)\n {\n\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 15.5, "blob_id": "8759cd05fe4d0b26fd35d03e3412853a38c88852", "content_id": "002987db1b73a6f8543a2d0731d31f3aaf2c38e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/README.md", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "# lucid-dream\nSubmarine Hackweek\n" }, { "alpha_fraction": 0.3713732957839966, "alphanum_fraction": 0.39209726452827454, "avg_line_length": 36.70833206176758, "blob_id": "e0f387a9f6fd6a6de4f864b50808d5839d2cf4a9", "content_id": "fb49bafeb3823b29f06bc8ded2992688557a8d95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3619, "license_type": "no_license", "max_line_length": 104, "num_lines": 96, "path": "/UI/NodejsDreamServer/rabbitMicroserviceStructure.js", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "var amqp = require('amqplib/callback_api');\n\nmodule.exports = {\n\n publish: function (exchangeName, routingKey, message, exchangeType) {\n\n //'amqp://localhost'\n amqp.connect('amqp://rutush:[email protected]', function (error0, connection) {\n if (error0) {\n throw error0;\n }\n connection.createChannel(function (error1, channel) {\n if (error1) {\n throw error1;\n }\n\n // var queueName = 'audioMessages';\n var msg = 'audio msg';\n\n channel.assertExchange(exchangeName, exchangeType, {\n durable: false\n });\n\n channel.publish(exchangeName, routingKey, Buffer.from(message));\n\n // channel.bindQueue(queueName, exchangeName, routingKey);\n\n console.log(\" [x] Sent %s\", message);\n\n });\n\n setTimeout(function () {\n connection.close();\n //process.exit(0);\n }, 500);\n\n });\n },\n\n consume: async function (handler, exchangeName, routingKey, queueName, exchangeType) {\n return new Promise(resolve => {\n //'amqp://rutush:[email protected]'\n amqp.connect('amqp://rutush:[email protected]', function (error0, connection) {\n if (error0) {\n console.log('error in connection');\n throw error0;\n }\n connection.createChannel(function (error1, channel) {\n if (error1) {\n throw error1;\n }\n\n channel.assertExchange(exchangeName, exchangeType, {\n durable: false\n });\n\n channel.assertQueue(queueName, function (error2, q) {\n // if (error2) {\n // throw error2;\n // }\n console.log(\" [*] Waiting for messages in %s. To exit press CTRL+C\", queueName);\n channel.bindQueue(queueName, exchangeName, routingKey);\n channel.consume(queueName, function (msg) {\n if (msg.content) {\n console.log(\" [x] %s\", msg.content.toString());\n message = msg.content.toString();\n handler(message);\n }\n console.log('.....');\n setTimeout(function () {\n console.log(\"Message:\", msg.content.toString());\n }, 4000);\n }, {\n noAck: true\n });\n\n resolve(channel);\n //return channel.consume(q.queue, function (msg) {\n // if (msg.content) {\n // console.log(\" [x] %s\", msg.content.toString());\n // message = msg.content.toString;\n // }\n // console.log('.....');\n // setTimeout(function () {\n // console.log(\"Message:\", msg.content.toString());\n // }, 4000);\n //},\n //{\n // noAck: true\n //});\n });\n });\n });\n });\n }\n}" }, { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.6976743936538696, "avg_line_length": 16.91666603088379, "blob_id": "9bf74e9315958d46dd0920809f90af902ffd32b9", "content_id": "bad75d9b995d4bda56d13852ec39860021565a24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 217, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/LiveAudioPlayer/LiveAudioPlayer/TrackWithStitchedBeam.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace LiveAudioPlayer\n{\n public class TrackWithStitchedBeam\n {\n public int TrackNum;\n public byte[] StitchedBeam;\n }\n}\n" }, { "alpha_fraction": 0.5572672486305237, "alphanum_fraction": 0.5572672486305237, "avg_line_length": 35.414634704589844, "blob_id": "71fc0e38792b3fbc403155913a3e28a0a22b7162", "content_id": "4128e0d0487e84b45f408944140e399c98dff6ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 104, "num_lines": 41, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/TrackWithStitchedBeamSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing BestTrackBeamSticher;\nusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace BestTrackBeamStitcher\n{\n public static class TrackWithStitchedBeamSender\n {\n static IModel TrackWithStitchedBeamChannel;\n\n static TrackWithStitchedBeamSender()\n {\n IConnection connection = RabbitMQConnection.getConnection();\n TrackWithStitchedBeamChannel = connection.CreateModel();\n\n TrackWithStitchedBeamChannel.ExchangeDeclare(exchange: \"trackWithStitchedBeamData\",\n type: ExchangeType.Fanout,\n durable: true,\n autoDelete: false,\n arguments: null);\n }\n\n public static void sendTrackWithStitchedBeam(TrackWithStitchedBeam trackWithStitchedBeam)\n {\n byte[] body = Encoding.Default.GetBytes(JsonConvert.SerializeObject(trackWithStitchedBeam));\n\n TrackWithStitchedBeamChannel.BasicPublish(exchange: \"trackWithStitchedBeamData\",\n routingKey: \"\",\n basicProperties: null,\n body: body);\n }\n\n public static void dispose()\n {\n TrackWithStitchedBeamChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.5886824131011963, "alphanum_fraction": 0.5895270109176636, "avg_line_length": 27.190475463867188, "blob_id": "c4f599f8e97e2c43cc6c4a6b340a2bb1fbfb84c3", "content_id": "7ca04279ba75e3c71dd2e4fe48f9e53a5b4b35fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/Simulator/GlobalResources/src/RabbitMQServer.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing RabbitMQ.Client;\nusing Newtonsoft.Json;\n\nnamespace GlobalResources\n{\n public class RabbitMQServer\n {\n ConnectionFactory Factory { get; set; }\n\n IConnection Connection { get; set; }\n\n IModel Channel { get; set; }\n\n protected string _exchangeName;\n\n public RabbitMQServer(string exchangName)\n {\n _exchangeName = exchangName;\n Factory = new ConnectionFactory() { HostName = \"localhost\" };\n Connection = Factory.CreateConnection();\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: _exchangeName, type: ExchangeType.Fanout);\n }\n\n public void SendMessage(object message)\n {\n string json = JsonConvert.SerializeObject(message);\n\n var body = Encoding.UTF8.GetBytes(json);\n\n Channel.BasicPublish(exchange: _exchangeName,\n routingKey: \"\",\n basicProperties: null,\n body: body);\n }\n }\n}\n" }, { "alpha_fraction": 0.7357512712478638, "alphanum_fraction": 0.7668393850326538, "avg_line_length": 16.545454025268555, "blob_id": "dff37466967c208cb158b3688cec66faff319a34", "content_id": "f087454ef01490460dd6792b9a328ad906fc59b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 195, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/lucidDBManager/LucidDream_TMA_Microservice/src/RabbitMQSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\n\nnamespace LucidDream_ManagedContractManagerSystem_VS2010x64.src\n{\n class RabbitMQSender\n {\n }\n}\n" }, { "alpha_fraction": 0.6383135914802551, "alphanum_fraction": 0.6627218723297119, "avg_line_length": 29.044445037841797, "blob_id": "5bc1a92f46ccc7e3a0fc756eace59923f09bf8d3", "content_id": "8629284ad12da02cd9788e756324ee6d71d6b393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 120, "num_lines": 45, "path": "/UI/AngularDreamUI/src/app/ownboat/ownboat.component.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport { WebSocketService } from \".././websocket.service\";\n\n@Component({\n selector: 'app-ownboat',\n templateUrl: './ownboat.component.html',\n styleUrls: ['./ownboat.component.css']\n})\nexport class OwnboatComponent implements OnInit {\n\n //course = 228.770;\n roll = 228.770;\n pitch = 228.770;\n //propeller_rpm = 228.770;\n //diving_depth = 228.770;\n course_overe_ground = 0;\n heave = 0;\n heading = 0;\n\n ownboatTime='';\n constructor(private webSocketService: WebSocketService) { }\n\n ngOnInit() {\n\n this.webSocketService.listen('client_ownboatData').subscribe((data) => {\n // console.log(data);\n var ownboatObj = JSON.parse(data as string);\n console.log(data);\n console.log(ownboatObj);\n console.log(ownboatObj.course_overe_ground);\n\n // Get all ownboat data from server and present it \n this.roll = ownboatObj.roll;\n this.pitch = ownboatObj.pitch;\n\n this.course_overe_ground = ownboatObj.course_overe_ground;\n this.heave = ownboatObj.heave;\n this.heading = ownboatObj.heading;\n\n this.ownboatTime = ownboatObj.timeStamp.day + '\\\\' + ownboatObj.timeStamp.month + '\\\\' + ownboatObj.timeStamp.year\n + ' :' + ownboatObj.timeStamp.hours + ':' + ownboatObj.timeStamp.minutes + ':' + ownboatObj.timeStamp.seconds;\n });\n }\n\n}\n" }, { "alpha_fraction": 0.5833714008331299, "alphanum_fraction": 0.5911375284194946, "avg_line_length": 29.83098602294922, "blob_id": "ee7e850fb89d9f3a902fe294759388854f612e66", "content_id": "424fcce9cb26f0aa5e66206b3e77916622e5f27b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2191, "license_type": "no_license", "max_line_length": 215, "num_lines": 71, "path": "/Simulator/NavMessage/src/OriginalNavMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing static GlobalResources.BasicData;\n\nnamespace NavMessage\n{\n public class OriginalNavMessage\n {\n public TimeType timeStamp;\n public int timeZone;\n public float heading;\n public float headingRate;\n public float roll;\n public float rollRate;\n public float pitch;\n public float pitchRate;\n public float heave;\n public float heaveRate;\n public float course_overe_ground;\n\n public OriginalNavMessage()\n {\n GetGeneratedObject();\n }\n\n public OriginalNavMessage(TimeType timeStamp, int timeZone, float heading, float headingRate, float roll, float rollRate, float pitch, float pitchRate, float heave, float heaveRate,float course_overe_ground)\n {\n this.timeStamp = new TimeType();\n this.timeStamp.c_seconds = timeStamp.c_seconds;\n this.timeStamp.seconds = timeStamp.seconds;\n this.timeStamp.minutes = timeStamp.minutes;\n this.timeStamp.hours = timeStamp.hours;\n this.timeStamp.day = timeStamp.day;\n this.timeStamp.month = timeStamp.month;\n this.timeStamp.year = timeStamp.year;\n\n this.timeZone = timeZone;\n this.heading = heading;\n this.headingRate = headingRate;\n this.roll = roll;\n this.rollRate = rollRate;\n this.pitch = pitch;\n this.pitchRate = pitchRate;\n this.heave = heave;\n this.heaveRate = heaveRate;\n this.course_overe_ground = course_overe_ground;\n\n \n }\n private OriginalNavMessage GetGeneratedObject()\n {\n this.timeStamp = new TimeType();\n timeStamp.c_seconds = 1;\n timeStamp.seconds = 1;\n timeStamp.minutes = 1;\n timeStamp.hours = 1;\n timeStamp.day = 1;\n timeStamp.month = 1;\n timeStamp.year = 1;\n\n return new OriginalNavMessage(this.timeStamp, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1);\n }\n }\n\n \n\n \n}\n" }, { "alpha_fraction": 0.5210728049278259, "alphanum_fraction": 0.5210728049278259, "avg_line_length": 22.02941131591797, "blob_id": "d3dd2edc65e2a30b15ddb68acbbb197cf74bc86b", "content_id": "25e92e514bf819073b889b13b63a4a00d1d69cc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 785, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/LiveAudioPlayer/LiveAudioPlayer/Program.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Threading;\n\nnamespace LiveAudioPlayer\n{\n class Program\n {\n static void Main(string[] args)\n {\n Console.WriteLine(\"LiveAudioPlayer service\");\n\n LiveTrackBeamReciever.StartListening((liveBeam) =>\n {\n LiveAudioPlayer.LiveAudioStreamRecieved(liveBeam);\n });\n\n PlayerCommandsReciever.StartListening((command) =>\n {\n LiveAudioPlayer.CommandRecieved(command);\n });\n\n playDummyCommands();\n }\n\n private static void playDummyCommands()\n {\n while (true)\n {\n var c = Console.ReadLine();\n LiveAudioPlayer.CommandRecieved(c);\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.5519230961799622, "alphanum_fraction": 0.5526922941207886, "avg_line_length": 32.339744567871094, "blob_id": "dc11766d721bd900edc70c35da32850754439a3a", "content_id": "686b48757c044085dfbd9dc9f18ffab6e4c59b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5202, "license_type": "no_license", "max_line_length": 117, "num_lines": 156, "path": "/lucidDBManager/lucidDBManager/RabbitMQ/RabbitMQReciever.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing lucidDBManager.Data;\nusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing System.Threading;\n\nnamespace lucidDBManager.RabbitMQ\n{\n public class RabbitMQReciever\n {\n\n ConnectionFactory Factory { get; set; }\n\n IConnection Connection { get; set; }\n\n IModel Channel { get; set; }\n\n EventingBasicConsumer TMAConsumer { get; set; }\n\n EventingBasicConsumer OwnBoatConsumer { get; set; }\n\n EventingBasicConsumer EMCSConsumer { get; set; }\n\n EventingBasicConsumer ActionConsumer { get; set; }\n\n DataHandler DataHandler { get; set; }\n\n public RabbitMQReciever(DataHandler handler)\n {\n DataHandler = handler;\n Factory = new ConnectionFactory() { HostName = \"localhost\" };\n Connection = Factory.CreateConnection();\n\n InitFromGUIActionReceiver();\n }\n\n public void StartRecording()\n {\n InitFromUAGTMAReceiver();\n InitFromUAGOwnBoatReceiver();\n InitFromUAGEMCSReceiver();\n }\n\n public void StopRecording()\n {\n // Fix!!!!\n Channel.ExchangeDelete(\"TrackData\");\n Channel.ExchangeDelete(\"OwnBoatData\");\n }\n\n private void InitFromUAGTMAReceiver()\n {\n\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: \"TrackData\", type: ExchangeType.Fanout);\n Channel.QueueDeclare(\"UAGTrackDataQueue\");\n Channel.QueueBind(queue: \"UAGTrackDataQueue\",\n exchange: \"TrackData\",\n routingKey: \"\");\n\n TMAConsumer = new EventingBasicConsumer(Channel);\n TMAConsumer.Received += (model, ea) =>\n {\n var body = ea.Body;\n var message = Encoding.UTF8.GetString(body);\n\n TMAOriginalMessage tmaMessage = JsonConvert.DeserializeObject<TMAOriginalMessage>(message);\n\n\n DataHandler.ReceiveTMAData(tmaMessage);\n };\n\n Channel.BasicConsume(queue: \"UAGTrackDataQueue\",\n autoAck: true,\n consumer: TMAConsumer);\n }\n\n private void InitFromUAGOwnBoatReceiver()\n {\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: \"OwnBoatData\", type: ExchangeType.Fanout);\n Channel.QueueDeclare(\"UAGOwnBoatQueue\");\n Channel.QueueBind(queue: \"UAGOwnBoatQueue\",\n exchange: \"OwnBoatData\",\n routingKey: \"\");\n OwnBoatConsumer = new EventingBasicConsumer(Channel);\n OwnBoatConsumer.Received += (model, ea) =>\n {\n var body = ea.Body;\n var message = Encoding.UTF8.GetString(body);\n\n OwnBoatOriginalMessage ownMessage = JsonConvert.DeserializeObject<OwnBoatOriginalMessage>(message);\n\n DataHandler.ReceiveOwnBoatData(ownMessage);\n };\n\n\n Channel.BasicConsume(queue: \"UAGOwnBoatQueue\",\n autoAck: true,\n consumer: OwnBoatConsumer);\n }\n\n private void InitFromUAGEMCSReceiver()\n {\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: \"EMCSData\", type: ExchangeType.Fanout);\n Channel.QueueDeclare(\"UAGEMCSQueue\");\n Channel.QueueBind(queue: \"UAGEMCSQueue\",\n exchange: \"EMCSData\",\n routingKey: \"\");\n EMCSConsumer = new EventingBasicConsumer(Channel);\n EMCSConsumer.Received += (model, ea) =>\n {\n var body = ea.Body;\n var message = Encoding.UTF8.GetString(body);\n\n //OwnBoatOriginalMessage ownMessage = JsonConvert.DeserializeObject<OwnBoatOriginalMessage>(message);\n\n //DataHandler.ReceiveOwnBoatData(ownMessage);\n };\n\n\n Channel.BasicConsume(queue: \"UAGEMCSQueue\",\n autoAck: true,\n consumer: EMCSConsumer);\n }\n\n private void InitFromGUIActionReceiver()\n {\n\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: \"Action\", type: ExchangeType.Fanout);\n Channel.QueueDeclare(\"GuiActionQueue\");\n Channel.QueueBind(queue: \"GuiActionQueue\",\n exchange: \"Action\",\n routingKey: \"\");\n\n ActionConsumer = new EventingBasicConsumer(Channel);\n ActionConsumer.Received += (model, ea) =>\n {\n var body = ea.Body;\n var message = Encoding.UTF8.GetString(body);\n\n DataHandler.ReceiveActionMessage(message);\n };\n\n Channel.BasicConsume(queue: \"GuiActionQueue\",\n autoAck: true,\n consumer: ActionConsumer);\n }\n\n }\n}" }, { "alpha_fraction": 0.663335919380188, "alphanum_fraction": 0.663335919380188, "avg_line_length": 24.509803771972656, "blob_id": "434f42650770589fb4f7961a0b515c3c693d495b", "content_id": "9bef092b99167560d92a9b3dc49afe438a33145f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 56, "num_lines": 51, "path": "/lucidDBManager/lucidDBManager/Data/TMAOriginalMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing static lucidDBManager.Data.BasicOriginalData;\n\nnamespace lucidDBManager.Data\n{\n // Original system track message\n public class TMAOriginalMessage\n {\n public TimeStampType timeStamp;\n public List<OriginalSystemTrack> systemTracks;\n }\n \n public class OriginalSystemTrack\n {\n public long trackId;\n public long state;\n public float bearing;\n public IsBearingRateValidType bearingRate;\n public float s_n_ratio;\n public float target_level;\n public long approach_receed_indicator;\n public long constant_bearing_warning;\n public FilterValidType bandwidth;\n public float integration_time;\n public long integration_time_nominal;\n public long integrat_time_selection_mode;\n public TimeStampType timeStamp;\n public List<AngleValidType> rawBearingCndidates;\n }\n\n public class IsBearingRateValidType\n {\n public bool valid;\n public float value;\n }\n\n public class FilterValidType\n {\n public bool valid;\n public float lower;\n public float upper;\n }\n\n public class AngleValidType\n {\n public bool valid;\n public float value;\n }\n}\n" }, { "alpha_fraction": 0.4543859660625458, "alphanum_fraction": 0.4865497052669525, "avg_line_length": 27.032787322998047, "blob_id": "44594b52bdbd2ee878d086cba8b5fd6c8815b4df", "content_id": "6b30019a77b5cec830de30db6039f8ee7dbc7563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3422, "license_type": "no_license", "max_line_length": 82, "num_lines": 122, "path": "/Simulator/WaterfallSimulator/LineGenerator.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace WaterfallSimulator\n{\n public class LineGenerator\n {\n private int diagonalPosition = 0;\n Random rnd = new Random();\n\n /// <summary>\n /// Generates a random bytes array.\n /// </summary>\n /// <returns></returns>\n public byte[] GenerateRandomLine()\n {\n byte[] data = new byte[4096];\n rnd.NextBytes(data);\n\n for (int i = 0; i < data.Length; i++)\n data[i] = (byte)rnd.Next(0, 150);\n\n validateData(ref data);\n AddGrid(ref data);\n AddDiagonalLine(ref data);\n return data;\n }\n\n private void validateData(ref byte[] data)\n {\n for (int i = 0; i < data.Length; i++)\n {\n if (data[i] == (byte)'P' || data[i] == (byte)'C')\n data[i] = 0;\n }\n }\n\n private void AddGrid(ref byte[] data)\n {\n for (int i = 0; i < data.Length; i += 256)\n {\n data[i] = 255;\n }\n }\n private void AddDiagonalLine(ref byte[] data)\n {\n if (diagonalPosition == 1000)\n {\n diagonalPosition = 0;\n }\n\n var diagonalPosition1 = (diagonalPosition + 250) % 1000;\n var diagonalPosition2 = (diagonalPosition + 500) % 1000;\n var diagonalPosition3 = (diagonalPosition + 750) % 1000;\n\n data[diagonalPosition] = 255;\n data[diagonalPosition1] = 255;\n data[diagonalPosition2] = 255;\n data[diagonalPosition3] = 255;\n\n if (diagonalPosition > 0)\n {\n data[diagonalPosition - 1] = 255;\n }\n if (diagonalPosition1 > 0)\n {\n data[diagonalPosition1 - 1] = 255;\n }\n if (diagonalPosition2 > 0)\n {\n data[diagonalPosition2 - 1] = 255;\n }\n if (diagonalPosition3 > 0)\n {\n data[diagonalPosition3 - 1] = 255;\n }\n\n\n\n diagonalPosition++;\n }\n\n private int currentLineNumber = 0;\n private int currentPageNumber = 0;\n public byte[] ReadLineFromWF()\n {\n byte[] tempData = new byte[4*4096];\n byte[] data = new byte[4096];\n\n\n var waterfallPage = FileManager.readWfPageFromFile(currentPageNumber);\n if (waterfallPage == null)\n {\n currentPageNumber = 0;\n waterfallPage = FileManager.readWfPageFromFile(currentPageNumber);\n }\n\n var linesList = waterfallPage.Values.ToList();\n\n if (currentLineNumber >= linesList.Count)\n {\n currentLineNumber = 0;\n currentPageNumber++;\n waterfallPage = FileManager.readWfPageFromFile(currentPageNumber);\n linesList = waterfallPage.Values.ToList();\n }\n var returnLineNumber = currentLineNumber;\n currentLineNumber++;\n\n for (int i = 0, j = 0; i < 4*4096; i = i+4, j++)\n {\n data[j] = linesList[returnLineNumber][i];\n }\n\n return data;\n }\n }\n}\n" }, { "alpha_fraction": 0.6602316498756409, "alphanum_fraction": 0.6702702641487122, "avg_line_length": 32.20512771606445, "blob_id": "ad3f8a7583f65ec8b209ab0381b3ad55e4333dbb", "content_id": "470df2028600c7dfcd8f56d2c24f0133d494c3c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1295, "license_type": "no_license", "max_line_length": 101, "num_lines": 39, "path": "/UI/AngularDreamUI/src/app/track-data/track-data.component.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport { WebSocketService } from '../websocket.service';\nimport { MessageService } from '../message.service';\n\n@Component({\n selector: 'app-track-data',\n templateUrl: './track-data.component.html',\n styleUrls: ['./track-data.component.css']\n})\nexport class TrackDataComponent implements OnInit {\n id = 770;\n bearing = 350.33;\n bearing_rate = 369.2;\n\n constructor(private webSocketService: WebSocketService, private messageService: MessageService) { }\n\n ngOnInit() {\n this.webSocketService.listen('client_trackData').subscribe((data) => {\n // console.log(data);\n var trackDataObj = JSON.parse(data as string);\n console.log(data);\n console.log(trackDataObj.systemTracks[0].trackID);\n console.log(trackDataObj.course_overe_ground);\n\n // Get the id that was clicked\n this.messageService.getMessage().subscribe(msg => {this.id = msg.text});\n\n // Check which id was clicked and show the data according to the id \n trackDataObj.systemTracks.forEach(trackData => {\n if (trackData.trackID == this.id) {\n this.id = trackData.trackID;\n this.bearing_rate = trackData.relativeBearingRate;\n this.bearing = trackData.relativeBearing;\n }\n });\n });\n }\n\n}\n" }, { "alpha_fraction": 0.582608699798584, "alphanum_fraction": 0.582608699798584, "avg_line_length": 19.909090042114258, "blob_id": "0995c48e24ce14d682d32cd81b896b54aed162da", "content_id": "56c7fd9b8af2a51364b58dccd053e1d5258ec7bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 462, "license_type": "no_license", "max_line_length": 34, "num_lines": 22, "path": "/Simulator/GlobalResources/src/BasicData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace GlobalResources\n{\n public class BasicData\n {\n public struct TimeType\n {\n public long hours;\n public long minutes;\n public long seconds;\n public long c_seconds;\n public long year;\n public long month;\n public long day;\n }\n }\n}\n" }, { "alpha_fraction": 0.5009810328483582, "alphanum_fraction": 0.5052321553230286, "avg_line_length": 31.53191566467285, "blob_id": "4b547f4bc8575aa8e2aa54e758c6188e64c77fe1", "content_id": "f7fa29c0e4ed54bc6e0fc3d1bb7594007ef66f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3060, "license_type": "no_license", "max_line_length": 123, "num_lines": 94, "path": "/LiveAudioPlayer/LiveAudioPlayer/LiveAudioPlayer.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing NAudio.Utils;\nusing NAudio.Wave;\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Text;\nusing System.Threading;\n\nnamespace LiveAudioPlayer\n{\n public static class LiveAudioPlayer\n {\n static int currentPlayingTrackId;\n static bool playing;\n static bool trackChanged;\n static WaveOutEvent player;\n static WaveFormat waveFormat;\n static MemoryStream stream;\n static WaveFileWriter waveFileWriter;\n\n static LiveAudioPlayer()\n {\n currentPlayingTrackId = -1;\n playing = false;\n trackChanged = false;\n player = new WaveOutEvent();\n waveFormat = new WaveFormat(31250, 16, 1);\n stream = new MemoryStream();\n waveFileWriter = new WaveFileWriter(new IgnoreDisposeStream(stream), waveFormat);\n }\n\n public static void LiveAudioStreamRecieved(TrackWithStitchedBeam liveBeam)\n {\n if (liveBeam.TrackNum == currentPlayingTrackId)\n {\n if (playing)\n {\n writeToStream(liveBeam);\n\n if (trackChanged)\n {\n // start playing the audio with delay to avoid getting to end of stream before the next stream part\n Thread.Sleep(100);\n var provider = new RawSourceWaveStream(stream, waveFormat);\n player.Init(provider);\n player.Play();\n trackChanged = false;\n Console.WriteLine($\"Now playing track: {currentPlayingTrackId}\");\n }\n }\n }\n }\n\n private static void writeToStream(TrackWithStitchedBeam liveBeam)\n {\n var position = stream.Position;\n stream.Position = stream.Length;\n waveFileWriter.Write(liveBeam.StitchedBeam, 0, liveBeam.StitchedBeam.Length);\n waveFileWriter.Flush();\n stream.Position = position;\n }\n\n public static void CommandRecieved(string command)\n {\n Console.WriteLine($\"Recieved command: {command}\");\n\n if (command == \"stop\")\n {\n playing = false;\n }\n else\n {\n try\n {\n int trackId = int.Parse(command);\n playing = true;\n\n if (trackId != currentPlayingTrackId)\n {\n trackChanged = true;\n currentPlayingTrackId = trackId;\n player.Stop();\n stream = new MemoryStream();\n waveFileWriter = new WaveFileWriter(new IgnoreDisposeStream(stream), waveFormat);\n }\n }\n catch (Exception)\n {\n Console.WriteLine(\"Recieved bad command...\");\n }\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.4573391079902649, "alphanum_fraction": 0.46963122487068176, "avg_line_length": 35.880001068115234, "blob_id": "7d61ea784e3c212e5358161fe23768716d558ce6", "content_id": "36df8ce34ac7e57aabc64a7a863ad4d8308a9f23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3122, "license_type": "no_license", "max_line_length": 140, "num_lines": 75, "path": "/Simulator/WaterfallSimulator/FileManager.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.IO;\nusing System.Runtime.Serialization.Formatters.Binary;\nusing WaterfallSimulator.Properties;\n\nnamespace WaterfallSimulator\n{\n public static class FileManager\n {\n /*\n Presentation of the data that is stored in one line:\n โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„\n โ–Œ 8 bytes โ– 4 bytes โ– <= 4096 bytes โ–\n โ–Œ Time Info โ– WF Data Length โ– Actual Waterfall Data โ–\n โ–Œ โ–ผ โ– โ–ผ โ– โ–ผ โ–\n โ–Œ [0] [1] [2] [3] [4] [5] [6] [7]โ– [8] [9] [10] [11] โ– [12] - [4108] โ–\n โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„โ–„\n\n */\n\n private static BinaryFormatter bformatter = new BinaryFormatter();\n\n //number of bytes of timeInfo at the beginning of the line represents the miliseconds in long type variable which spread to 8 bytes.\n public const int TimeInfoLength = 8;\n \n \n public static Dictionary<DateTime, byte[]> readWfPageFromFile(int numberOfPage)\n {\n string filePath = Settings.Default.FolderPath + \"/\" + numberOfPage + \".wf\";\n Dictionary<DateTime, byte[]> page = null;\n\n if (File.Exists(filePath))\n {\n page = new Dictionary<DateTime, byte[]>();\n FileStream wfPageFile = File.Open(filePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);\n\n byte[] line = new byte[Settings.Default.Page_Width];\n byte[] timeBuffer = new byte[TimeInfoLength];\n\n bool isPageFinished = false;\n\n for (int i = 0; i < Settings.Default.Page_Height && !isPageFinished; ++i)\n {\n wfPageFile.Read(timeBuffer, 0, TimeInfoLength);\n DateTime time = DateTime.FromBinary(BitConverter.ToInt64(timeBuffer.Take(TimeInfoLength).ToArray(), 0));\n\n if (time == DateTime.MinValue)\n {\n isPageFinished = true;\n }\n else\n {\n wfPageFile.Read(line, 0, Settings.Default.Page_Width);\n\n page.Add(time, line);\n\n line = new byte[Settings.Default.Page_Width];\n timeBuffer = new byte[TimeInfoLength];\n }\n }\n\n wfPageFile.Close();\n }\n\n return page;\n }\n \n\n \n }\n}\n" }, { "alpha_fraction": 0.4778420031070709, "alphanum_fraction": 0.4971098303794861, "avg_line_length": 18.961538314819336, "blob_id": "7339ed87303ac23691dda3a6411adafe6f328549", "content_id": "a6b306f195d14a8f90e00b1bb1911c522c718b63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 521, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/Simulator/WaterfallSimulator/LSRHeaderGenerator.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace WaterfallSimulator\n{\n public class LSRHeaderGenerator\n {\n public byte[] CreateHeader()\n {\n byte[] header = new byte[24];\n\n header[0] = 0;\n header[1] = 4;\n header[2] = 4;\n\n for (int i = 3; i < header.Length; i++)\n {\n header[i] = 0;\n }\n return header;\n }\n }\n}\n" }, { "alpha_fraction": 0.5876923203468323, "alphanum_fraction": 0.5876923203468323, "avg_line_length": 23.375, "blob_id": "5d18df8fe1a6aa606ec11fe1485deecb13e547a5", "content_id": "4992bcc6b1d0bf1dec8a20004f42344a3477ded1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 977, "license_type": "no_license", "max_line_length": 56, "num_lines": 40, "path": "/lucidDBManager/lucidDBManager/Manager.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing lucidDBManager.mongoDB;\nusing lucidDBManager.RabbitMQ;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace lucidDBManager\n{\n public class Manager\n {\n private DataHandler Handler { get; set; }\n\n private RabbitMQReciever Receiver { get; set; }\n\n private RabbitMQSender Sender { get; set; }\n\n private MongoDBServer DB { get; set; }\n\n public Manager()\n {\n DB = new MongoDBServer();\n DB.initDB(\"Lucid\");\n Sender = new RabbitMQSender();\n Handler = new DataHandler(Sender, DB, this);\n Receiver = new RabbitMQReciever(Handler);\n }\n\n public void StartReceivingUAG()\n {\n Console.WriteLine(\"StartReceivingUAG\");\n Receiver.StartRecording();\n }\n\n public void StopReceivingUAG()\n {\n Console.WriteLine(\"StopReceivingUAG\");\n Receiver.StopRecording();\n }\n }\n}\n" }, { "alpha_fraction": 0.7120742797851562, "alphanum_fraction": 0.7120742797851562, "avg_line_length": 18, "blob_id": "ff0e5eef4e1e3ea79fab0a4305a616d10b39f867", "content_id": "31111adcca64bc42149bd27fecbae1f7f0494075", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 325, "license_type": "no_license", "max_line_length": 71, "num_lines": 17, "path": "/Simulator/BdtCasMessage/src/BdtCasRabbitMQ.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing GlobalResources;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace BdtCasMessage\n{\n public class BdtCasRabbitMQ : RabbitMQServer\n {\n public BdtCasRabbitMQ(string exchangeName) : base(exchangeName)\n {\n\n }\n }\n}\n" }, { "alpha_fraction": 0.65684574842453, "alphanum_fraction": 0.65684574842453, "avg_line_length": 25.227272033691406, "blob_id": "7f1159ecbaf4ee6ab939c2f541a725805ab8aea2", "content_id": "a4abee7a2c9cfc47c2584d345950a5ecad2821b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 579, "license_type": "no_license", "max_line_length": 93, "num_lines": 22, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/Program.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing BestTrackBeamStitcher;\nusing NAudio.Utils;\nusing NAudio.Wave;\nusing System;\nusing System.IO;\n\nnamespace BestTrackBeamSticher\n{\n class Program\n {\n static void Main(string[] args)\n {\n Console.WriteLine(\"BestTrackBeamSticher service\");\n\n TrackBeamDataReciever.StartListening((trackBeamData) =>\n {\n TrackWithStitchedBeam trackWithStitchedBeam = Stitcher.stitch(trackBeamData);\n TrackWithStitchedBeamSender.sendTrackWithStitchedBeam(trackWithStitchedBeam);\n }); \n }\n }\n}\n" }, { "alpha_fraction": 0.39521345496177673, "alphanum_fraction": 0.4165588617324829, "avg_line_length": 23.935483932495117, "blob_id": "1ab82342d680d2f14efc006253a3a99cb99d5d06", "content_id": "805512e1f928221066796c67cd19514a1cc51d91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1548, "license_type": "no_license", "max_line_length": 84, "num_lines": 62, "path": "/TrackBeamParser/TrackBeamParser/CASSubSegment.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public class CASSubSegment\n {\n public int segID;\n public byte[] header;\n public byte[] data;\n public byte[] heading;\n\n public CASSubSegment(byte[] subSegment)\n {\n int id, isTen;\n int.TryParse(subSegment[4].ToString(), out id);\n segID = id;\n int dataStartPoint;\n int headingStartPoint = 1224;\n\n isTen = 0;\n switch (id)\n {\n case 1:\n dataStartPoint = 256;\n break;\n\n case 10:\n isTen = 16;\n dataStartPoint = 32;\n break;\n\n default:\n dataStartPoint = 32;\n break;\n }\n header = new byte[dataStartPoint];\n data = new byte[1400 - dataStartPoint - isTen];\n heading = new byte[2];\n\n for (int i = 0; i < dataStartPoint; i++)\n {\n header[i] = subSegment[i];\n }\n\n for (int j = 0; j < 1400 - dataStartPoint - isTen; j++)\n {\n data[j] = subSegment[dataStartPoint + j];\n }\n\n if (segID == 10)\n {\n for (int i = 0; i < 2; i++)\n {\n heading[i] = subSegment[headingStartPoint + dataStartPoint + i];\n }\n }\n\n }\n }\n}\n" }, { "alpha_fraction": 0.7067669034004211, "alphanum_fraction": 0.7067669034004211, "avg_line_length": 12.300000190734863, "blob_id": "bf250f5af06b12def081fe28d8e6ae2afc0eca45", "content_id": "f50235d3699c70783789b7c5ebfd23217feb22a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 135, "license_type": "no_license", "max_line_length": 33, "num_lines": 10, "path": "/lucidDBManager/lucidDBManager/Data/IPSData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace lucidDBManager.Data\n{\n class IPSData\n {\n }\n}\n" }, { "alpha_fraction": 0.5666041374206543, "alphanum_fraction": 0.5672295093536377, "avg_line_length": 33.021278381347656, "blob_id": "3136f6a37d53c7d66e1d858ee523b7cd13c8feb4", "content_id": "909edb8cd0b6ef4b34dd5ee533bcc4a688e2fb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1601, "license_type": "no_license", "max_line_length": 116, "num_lines": 47, "path": "/TrackBeamParser/TrackBeamParser/TracksDataReceiver.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public class TracksDataReceiver\n {\n static IModel trackDataChannel;\n\n public static void StartListening(Action<SystemTracks> funcThatWantTheData)\n {\n IConnection connection = RabbitMQConnection.getConnection();\n trackDataChannel = connection.CreateModel();\n\n trackDataChannel.ExchangeDeclare(exchange: \"LucidTrackData\", type: ExchangeType.Fanout);\n\n trackDataChannel.QueueDeclare(queue: \"track\",\n durable: false,\n exclusive: false,\n autoDelete: true,\n arguments: null);\n\n trackDataChannel.QueueBind(queue: \"track\", exchange: \"LucidTrackData\", routingKey: \"\");\n\n var consumer = new EventingBasicConsumer(trackDataChannel);\n consumer.Received += (model, ea) =>\n {\n byte[] body = ea.Body;\n SystemTracks trackData = JsonConvert.DeserializeObject<SystemTracks>(Encoding.UTF8.GetString(body));\n funcThatWantTheData(trackData);\n };\n\n trackDataChannel.BasicConsume(queue: \"track\",\n autoAck: true,\n consumer: consumer);\n }\n\n public static void stopListening()\n {\n trackDataChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.5300574898719788, "alphanum_fraction": 0.5305802226066589, "avg_line_length": 36.509803771972656, "blob_id": "603e40b094d436093c935b446e1e07fab65a27c7", "content_id": "5484c0b227729aad73c53966ef68129de21ee202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1915, "license_type": "no_license", "max_line_length": 133, "num_lines": 51, "path": "/LiveAudioPlayer/LiveAudioPlayer/LiveTrackBeamReciever.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace LiveAudioPlayer\n{\n public static class LiveTrackBeamReciever\n {\n static IModel liveBeamChannel; \n\n public static void StartListening(Action<TrackWithStitchedBeam> funcThatWantTheData)\n {\n IConnection connection = RabbitMQConnection.getConnection();\n liveBeamChannel = connection.CreateModel();\n\n liveBeamChannel.ExchangeDeclare(exchange: \"trackWithStitchedBeamData\",\n type: ExchangeType.Fanout,\n durable: true,\n autoDelete: false,\n arguments: null);\n\n liveBeamChannel.QueueDeclare(queue: \"liveBeamData\",\n durable: true,\n exclusive: false,\n autoDelete: false,\n arguments: null);\n\n liveBeamChannel.QueueBind(queue: \"liveBeamData\", exchange: \"trackWithStitchedBeamData\", routingKey: \"\");\n\n var consumer = new EventingBasicConsumer(liveBeamChannel);\n consumer.Received += (model, ea) =>\n {\n byte[] body = ea.Body;\n TrackWithStitchedBeam liveBeam = JsonConvert.DeserializeObject<TrackWithStitchedBeam>(Encoding.UTF8.GetString(body));\n funcThatWantTheData(liveBeam);\n };\n\n liveBeamChannel.BasicConsume(queue: \"liveBeamData\",\n autoAck: true,\n consumer: consumer);\n }\n\n public static void stopListening()\n {\n liveBeamChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.6747408509254456, "alphanum_fraction": 0.6782324314117432, "avg_line_length": 52.911766052246094, "blob_id": "214402a96eb503f0f17fbf212261fde5f19aa044", "content_id": "0d71c97b1b98b1d91330c6820641c334ee43cbf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 18330, "license_type": "no_license", "max_line_length": 327, "num_lines": 340, "path": "/lucidDBManager/LucidDream_BDT_Microservice/src/LucidDream_BDT_Managed.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n// Generated at : 31/10/2019 15:16:17 ,in TIK46593 PC ,by liran harari\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\nusing System;\nusing LucidDreamContractManager_Managed;\nusing DDS_ManagedShell;\nusing LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data;\nusing Newtonsoft.Json;\n\nnamespace LucidDreamSystem\n{\n class LucidDreamSystemClient\n {\n \n #region Data members\n\t\t// contarct file path\n static string contractPath = \"../config/LucidDream_ContractManagerSystem_Contract.xml\";\n\n //contract Manager\n private LucidDreamContractManager m_LucidDreamContractManager;\n\n #region DataWriters\n private LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataWriter m_BdtCasDataWriter;\n #endregion\n\n #region DataReaders\n private LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataReader m_BdtCasDataReader;\n #endregion\n #endregion\n\n #region Properties\n\n public RabbitMQSender rabbit;\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataWriter BdtCasDataWriter\n {\n get { return m_BdtCasDataWriter; }\n set { m_BdtCasDataWriter = value; }\n }\n\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataReader BdtCasDataReader\n {\n get { return m_BdtCasDataReader; }\n set { m_BdtCasDataReader = value; }\n }\n\n\n #endregion\n\n public void SetupDataWriters()\n\t\t{\n BdtCasDataWriter = m_LucidDreamContractManager.Get_LucidDreamParticipant_BdtCasDataWriter();\n }\n\n public void SetupDataReaders()\n\t\t{\n BdtCasDataReader = m_LucidDreamContractManager.Get_LucidDreamParticipant_BdtCasDataReader();\n }\n\n public void RegistrationToEvents()\n\t\t{\n\n\n #region Register BdtCasDataWriter events\n BdtCasDataWriter.OnLivelinessLost += new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n BdtCasDataWriter.OnOfferedDeadlineMissed += new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n BdtCasDataWriter.OnOfferedIncompatibleQOS += new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n BdtCasDataWriter.OnPublicationMatched += new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n #endregion\n\n #region Register BdtCasDataReader events\n BdtCasDataReader.OnSampleArrived += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeSampleArrivedHandler(BdtCasDataReaderOnSampleArrived);\n BdtCasDataReader.OnInstanceNotAliveNoWriters += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeInstanceNotAliveNoWritersHandler(BdtCasDataReaderOnInstanceNotAliveNoWriters);\n BdtCasDataReader.OnLivelinessGained += new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n BdtCasDataReader.OnLivelinessLost += new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n BdtCasDataReader.OnRequestedDeadlineMissed += new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n BdtCasDataReader.OnRequestedIncompatibleQos += new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n BdtCasDataReader.OnSampleLost += new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n BdtCasDataReader.OnSampleRejected += new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n BdtCasDataReader.OnSubscriptionMatched += new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n #endregion\n\n\n }\n\n public void Init()\n {\n m_LucidDreamContractManager = new LucidDreamContractManager();\n rabbit = new RabbitMQSender();\n \n m_LucidDreamContractManager.LoadFromFile(contractPath);\n m_LucidDreamContractManager.VivifyAll();\n SetupDataWriters();\n SetupDataReaders();\n RegistrationToEvents();\n }\n \n public void EnableAll()\n {\n\t\t\tm_LucidDreamContractManager.EnableAll();\n }\n \n public void Publish()\n {\n LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_type idl_idde_itfmod_to_3pa_bdt_track_data_idde_itfmod_to_3pa_bdt_track_data_type = new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_type();\n\n\n while(true)\n {\n System.Threading.Thread.Sleep(1000);\n }\n }\n\n public void Shutdown()\n {\n\n\n #region Unregister BdtCasDataWriter events\n BdtCasDataWriter.OnLivelinessLost -= new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n BdtCasDataWriter.OnOfferedDeadlineMissed -= new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n BdtCasDataWriter.OnOfferedIncompatibleQOS -= new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n BdtCasDataWriter.OnPublicationMatched -= new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n #endregion\n\n #region Unregister BdtCasDataReader events\n BdtCasDataReader.OnSampleArrived -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeSampleArrivedHandler(BdtCasDataReaderOnSampleArrived);\n BdtCasDataReader.OnInstanceNotAliveNoWriters -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeInstanceNotAliveNoWritersHandler(BdtCasDataReaderOnInstanceNotAliveNoWriters);\n BdtCasDataReader.OnLivelinessGained -= new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n BdtCasDataReader.OnLivelinessLost -= new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n BdtCasDataReader.OnRequestedDeadlineMissed -= new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n BdtCasDataReader.OnRequestedIncompatibleQos -= new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n BdtCasDataReader.OnSampleLost -= new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n BdtCasDataReader.OnSampleRejected -= new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n BdtCasDataReader.OnSubscriptionMatched -= new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n #endregion\n\n m_LucidDreamContractManager.SubdueAll();\n m_LucidDreamContractManager.UnLoad();\n }\n \n #region Events\n #region General DataWriters Event\n /*! This event is raised when the DataWriter failed to write data\n within the time period set in its DeadlineQosPolicy */\n public void OnOfferedDeadlineMissedHendler(DataWriter dataWriter , OfferedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" missed its offered deadline \" + status.TotalCountChanged + \" times.\");\n }\n\n /*! This event is raised when the DataWriter failed to signal its liveliness\n within the time specified by the LivelinessQosPolicy */\n public void OnLivelinessLostHendler(DataWriter dataWriter, LivelinessLostStatus status)\n {\n Console.WriteLine(\"Liveliness Lost on DataWriter: \" + dataWriter.Name);\n }\n\n /*! This event is raised when the DataWriter discovered a DataReader for\n the same topic, but the DataReader had requested Qos settings incompatible \n with this DataWriter's offered Qos */\n public void OnOfferedIncompatibleQOSHendler(DataWriter dataWriter,OfferedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" QoS policies, that were incompatible with remote DataReader. Check contracts on both sides.\");\n }\n \n\t\t/*! This event is raised when the DataWriter discovered a matching DataReader */\n public void OnPublicationMatchedHendler(DataWriter dataWriter, PublicationMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" Publication matched\");\n }\n else\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" DataReader lost\");\n }\n }\n #endregion\n\n #region General DataReaders Event\n /*! This event is raised when the DataReader did not receive\n\t\ta new sample for an data-instance within the time period \n\t\tset in the DataReader's DeadlineQosPolicy */\n public void OnRequestedDeadlineMissedEventHendler(DataReader dataReader, RequestedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"requested deadline missed on DataReader \" + dataReader.Name +\n \"requested deadline that was not respected by DataWriter \" +\n status.TotalCountChanged);\n }\n\n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive changed from any number to 0 */\n public void OnLivelinessLostEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"Liveliness lost of \" + dataReader.Name);\n }\n \n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive increased from 0 to 1 */\n public void OnLivelinessGainedEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"a new Liveliness gained of \" + dataReader.Name);\n }\n\n\t\t/*! This event is raised when the DataReader discovered a dataWriter for \n the same Topic, but that DataReader has requested Qos settings incompatible\n with this DataWriter's offered Qos */\n public void OnRequestedIncompatibleQosEventHendler(DataReader dataReader, RequestedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"Incompatible Qos on topic \" + dataReader.Name +\n\t\t\t \" QoS policies, that were inconsistent with DataWriter. Check contracts on both sides.\");\n }\n\n /*! This event is raised when one or more samples received from the DataWriter\n have been dropped by the DataReader */\n public void OnSampleRejectedEventHendler(DataReader dataReader, SampleRejectedStatus status)\n {\n\t\t\tConsole.WriteLine(\" sample rejected in DataReader: \" + dataReader.Name + \"the reason is: \" +\n \"samples were rejected. Usually this happens when DataReader's memory resources are exhausted.\");\n }\n \n\t\t/*! This event is raised when one or more samples received from the DataWriter\n have failed to be received */\n public void OnSampleLostEventHendler(DataReader dataReader, SampleLostStatus status)\n {\n\t\t\tConsole.WriteLine(\"\\n\" + status.TotalCountChanged + \" Sample lost on DataReader: \" + dataReader.Name +\n\t\t\t \"\\n until now \" + status.TotalCount + \" samples lost\" + \n\t\t\t \"\\n samples were lost. Usually this happens when DataWriter writes faster than DataReader reads.\");\n\n }\n \n\t\t/*! This event is raised when the DataReader discovered a matching DataWriter */\n public void OnSubscriptionMatchedEventHendler(DataReader dataReader, SubscriptionMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" subscription matched\");\n }\n else\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" an existing matched DataWriter has been deleted\");\n }\n }\n #endregion\n\n #region Specific DataReaders Events\n #region BdtCasDataReader Events\n public void BdtCasDataReaderOnSampleArrived(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_type dataType, SampleInfo info, ValidityStatus validity)\n {\n\n BDT_CAS_OriginalMessage converted_data = ConvertData(dataType);\n string data = JsonConvert.SerializeObject(converted_data);\n rabbit.SendData(data);\n Console.WriteLine(\"a new sample of \\\"idde_itfmod_to_3pa_bdt_track_data_type\\\" has arrived\");\n }\n\n\n public void BdtCasDataReaderOnInstanceNotAliveNoWriters(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data.idde_itfmod_to_3pa_bdt_track_data_type dataType, SampleInfo info)\n {\n Console.WriteLine(\"an instance of \\\"idde_itfmod_to_3pa_bdt_track_data_type\\\" has lost all its writers\");\n }\n\n #endregion\n #endregion\n #endregion\n\n public BDT_CAS_OriginalMessage ConvertData(idde_itfmod_to_3pa_bdt_track_data_type message)\n {\n BDT_CAS_OriginalMessage newDataClass = new BDT_CAS_OriginalMessage();\n\n newDataClass.timeStamp.time.hours = message.time_reference.time.hours;\n newDataClass.timeStamp.time.minutes = message.time_reference.time.minutes;\n newDataClass.timeStamp.time.seconds = message.time_reference.time.seconds;\n newDataClass.timeStamp.time.c_seconds = message.time_reference.time.c_seconds;\n\n newDataClass.timeStamp.date.day = message.time_reference.date.day;\n newDataClass.timeStamp.date.month = message.time_reference.date.month;\n newDataClass.timeStamp.date.year = message.time_reference.date.year;\n\n for (int i = 0; i < message.track_data.Length; i++)\n {\n OriginalSystemTrack currTrack = new OriginalSystemTrack();\n currTrack.approach_receed_indicator = message.track_data.get_Item((uint)i).approach_receed_indicator;\n currTrack.bandwidth.valid = message.track_data.get_Item((uint)i).bandwidth.valid;\n currTrack.bandwidth.upper = message.track_data.get_Item((uint)i).bandwidth.upper;\n currTrack.bandwidth.lower = message.track_data.get_Item((uint)i).bandwidth.lower;\n currTrack.bearing = message.track_data.get_Item((uint)i).bearing;\n currTrack.bearingRate.valid = message.track_data.get_Item((uint)i).bearing_rate.valid;\n currTrack.bearingRate.value = message.track_data.get_Item((uint)i).bearing_rate.value;\n currTrack.constant_bearing_warning = message.track_data.get_Item((uint)i).constant_bearing_warning;\n currTrack.integration_time = message.track_data.get_Item((uint)i).integration_time;\n currTrack.integration_time_nominal = message.track_data.get_Item((uint)i).integration_time_nominal;\n currTrack.integrat_time_selection_mode = message.track_data.get_Item((uint)i).integrat_time_selection_mode;\n\n for (int j = 0; j < message.track_data.get_Item((uint)i).raw_bearing_candidates.Length; j++)\n {\n currTrack.rawBearingCndidates.Add(new AngleValidType());\n currTrack.rawBearingCndidates[j].valid = message.track_data.get_Item((uint)i).raw_bearing_candidates.get_Item((uint)j).valid;\n currTrack.rawBearingCndidates[j].value = message.track_data.get_Item((uint)i).raw_bearing_candidates.get_Item((uint)j).value;\n }\n\n currTrack.state = message.track_data.get_Item((uint)i).state;\n currTrack.s_n_ratio = message.track_data.get_Item((uint)i).s_n_ratio;\n currTrack.target_level = message.track_data.get_Item((uint)i).target_level;\n\n currTrack.timeStamp.time.hours = message.track_data.get_Item((uint)i).time_reference.time.hours;\n currTrack.timeStamp.time.minutes = message.track_data.get_Item((uint)i).time_reference.time.minutes;\n currTrack.timeStamp.time.seconds = message.track_data.get_Item((uint)i).time_reference.time.seconds;\n currTrack.timeStamp.time.c_seconds = message.track_data.get_Item((uint)i).time_reference.time.c_seconds;\n\n currTrack.timeStamp.date.day = message.track_data.get_Item((uint)i).time_reference.date.day;\n currTrack.timeStamp.date.month = message.track_data.get_Item((uint)i).time_reference.date.month;\n currTrack.timeStamp.date.year = message.track_data.get_Item((uint)i).time_reference.date.year;\n\n currTrack.trackId = message.track_data.get_Item((uint)i).track_id;\n\n newDataClass.systemTracks.Add(currTrack);\n }\n\n return newDataClass;\n\n }\n }\n\n\n class SubscriberProgram\n {\n static void Main(string[] args)\n {\n LucidDreamSystemClient mySystemClient = new LucidDreamSystemClient();\n mySystemClient.Init();\n\t\t\tmySystemClient.EnableAll();\n\t\t\tmySystemClient.Publish();\n \n Console.WriteLine(\"Shutting Down...\");\n mySystemClient.Shutdown();\n }\n }\n}\n" }, { "alpha_fraction": 0.5577439665794373, "alphanum_fraction": 0.5684870481491089, "avg_line_length": 30.02777862548828, "blob_id": "9bad89858af6c4a2376ebb1d589ef3b34957746c", "content_id": "fb58f823cfe28d7aa84094a112af71c24649abc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2247, "license_type": "no_license", "max_line_length": 107, "num_lines": 72, "path": "/TrackBeamParser/TrackBeamParser/Program.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.IO;\nusing System.Linq;\nusing System.Collections.Generic;\nusing System.Collections.Concurrent;\nusing System.Threading;\nusing System.Diagnostics;\n\nnamespace TrackBeamParser\n{\n class Program\n {\n static int i = 0;\n static byte[] FileBytes = File.ReadAllBytes(@\"C:\\Users\\96ron\\Desktop\\ื”ืืงื•ื™ืง ืจืื’ื‘\\CAS_HAKATON.rec\");\n static MicroLibrary.MicroTimer AudioMicroTimer;\n static DateTime timestart;\n\n static void Main(string[] args)\n {\n Console.WriteLine(\"TrackBeamParser service\");\n\n Thread thread = new Thread(() =>\n {\n TracksDataReceiver.StartListening((trackData) =>\n {\n BeamMaker.onReceiveTracks(trackData);\n });\n });\n thread.Start();\n\n timestart = DateTime.Now;\n\n AudioMicroTimer = new MicroLibrary.MicroTimer();\n AudioMicroTimer.MicroTimerElapsed +=\n new MicroLibrary.MicroTimer.MicroTimerElapsedEventHandler(OnTimedEventAudio);\n\n AudioMicroTimer.Interval = 100; // 1000ยตs = 1ms\n AudioMicroTimer.Enabled = true; // Start timer\n }\n\n\n private static void OnTimedEventAudio(object sender,\n MicroLibrary.MicroTimerEventArgs timerEventArgs)\n {\n if (i >= FileBytes.Length)\n {\n Console.WriteLine(\"***** FINISHED *****\");\n var TotalSeconds = (DateTime.Now - timestart).TotalSeconds;\n Console.WriteLine($\"TotalSeconds: {TotalSeconds}\");\n AudioMicroTimer.Enabled = false;\n return;\n }\n\n byte[] part = new byte[1400];\n Buffer.BlockCopy(FileBytes, i, part, 0, 1400);\n CASSubSegment newCAS = new CASSubSegment(part);\n CASSegmentManger.BufferManger(newCAS);\n i += 1400;\n }\n\n private static void NOP(double durationSeconds)\n {\n var durationTicks = Math.Round(durationSeconds * Stopwatch.Frequency);\n var sw = Stopwatch.StartNew();\n\n while (sw.ElapsedTicks < durationTicks)\n {\n\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.716312050819397, "alphanum_fraction": 0.716312050819397, "avg_line_length": 13.100000381469727, "blob_id": "a896df4dcba1b0ae6840a1c58965d45a0520ec41", "content_id": "8ddd32001a5699bea0409879ee1a863293bb886d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 143, "license_type": "no_license", "max_line_length": 33, "num_lines": 10, "path": "/lucidDBManager/lucidDBManager/Data/EMCSData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace lucidDBManager.Data\n{\n public class EMCSData\n {\n }\n}\n" }, { "alpha_fraction": 0.5347721576690674, "alphanum_fraction": 0.5839328765869141, "avg_line_length": 19.850000381469727, "blob_id": "b2dc0e0f9efade220ecd549f025f57e149c05687", "content_id": "665643789a1140d295e615b19211fe73b4cf2143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 834, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/UI/AngularDreamUI/src/app/chart/chart.component.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\n\n@Component({\n selector: 'app-chart',\n templateUrl: './chart.component.html',\n styleUrls: ['./chart.component.css']\n})\nexport class ChartComponent implements OnInit {\n\n constructor() { }\n\n chartOptions = {\n responsive: true\n };\n chartData = [\n { data: [50, 90, 30, 55, 66, 88, 99, 100], label: 'Baruch' }\n ];\n\n chartLabels = ['0', '10', '20', '30', '40', '50', '60', '70'];\n\n onChartClick(event) {\n console.log(event);\n }\n\n newDataPoint(dataArr = [100, 100, 100], label) {\n\n this.chartData.forEach((dataset, index) => {\n this.chartData[index] = Object.assign({}, this.chartData[index], {\n data: [...this.chartData[index].data, dataArr[index]]\n });\n });\n \n this.chartLabels = [...this.chartLabels, label];\n \n }\n\n ngOnInit() {\n }\n\n}\n" }, { "alpha_fraction": 0.7038216590881348, "alphanum_fraction": 0.7038216590881348, "avg_line_length": 17.47058868408203, "blob_id": "0580fbfb9dc6e15d1e68f93d975c5435ff4c5a49", "content_id": "f25cbef9a8f58911e5f782bd705142bd0fa56ba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 316, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/Simulator/NavMessage/src/NavRabbitMQ.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing GlobalResources;\n\nnamespace NavMessage\n{\n public class NavRabbitMQ : RabbitMQServer\n {\n public NavRabbitMQ(string exchangeName) : base(exchangeName)\n {\n\n }\n }\n}\n" }, { "alpha_fraction": 0.6935545206069946, "alphanum_fraction": 0.6958057880401611, "avg_line_length": 61.36069107055664, "blob_id": "4e04e19d419bbad01133f8f7913c12addf8464e2", "content_id": "11435d8376b1a4a71e490afc7f9f03d32209ac67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 28873, "license_type": "no_license", "max_line_length": 320, "num_lines": 463, "path": "/lucidDBManager/LucidDream_OwnBoat_Microservice/src/LucidDream_OwnBoat_Managed.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n// Generated at : 31/10/2019 15:16:17 ,in TIK46593 PC ,by liran harari\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing LucidDreamContractManager_Managed;\nusing DDS_ManagedShell;\nusing LucidDream_DataTypesManaged;\nusing LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_nav_data;\nusing LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_bdt_track_data;\nusing LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data;\nusing LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data;\nusing Newtonsoft.Json;\n\nnamespace LucidDreamSystem\n{\n class LucidDreamSystemClient\n {\n \n #region Data members\n\t\t// contarct file path\n static string contractPath = \"../config/LucidDream_ContractManagerSystem_Contract.xml\";\n\n //contract Manager\n private LucidDreamContractManager m_LucidDreamContractManager;\n\n #region DataWriters\n private LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataWriter m_OwnBoatWriter;\n #endregion\n\n #region DataReaders\n private LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataReader m_OwnBoatReader;\n #endregion\n #endregion\n\n #region Properties\n\n public RabbitMQSender rabbit;\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataWriter OwnBoatWriter\n {\n get { return m_OwnBoatWriter; }\n set { m_OwnBoatWriter = value; }\n }\n\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataReader OwnBoatReader\n {\n get { return m_OwnBoatReader; }\n set { m_OwnBoatReader = value; }\n }\n\n\n #endregion\n\n public void SetupDataWriters()\n\t\t{\n OwnBoatWriter = m_LucidDreamContractManager.Get_LucidDreamParticipant_OwnBoatWriter();\n }\n\n public void SetupDataReaders()\n\t\t{\n OwnBoatReader = m_LucidDreamContractManager.Get_LucidDreamParticipant_OwnBoatReader();\n }\n\n public void RegistrationToEvents()\n\t\t{\n\n\n #region Register OwnBoatWriter events\n OwnBoatWriter.OnLivelinessLost += new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n OwnBoatWriter.OnOfferedDeadlineMissed += new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n OwnBoatWriter.OnOfferedIncompatibleQOS += new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n OwnBoatWriter.OnPublicationMatched += new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n #endregion\n\n #region Register OwnBoatReader events\n OwnBoatReader.OnSampleArrived += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeSampleArrivedHandler(OwnBoatReaderOnSampleArrived);\n OwnBoatReader.OnInstanceNotAliveNoWriters += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeInstanceNotAliveNoWritersHandler(OwnBoatReaderOnInstanceNotAliveNoWriters);\n OwnBoatReader.OnLivelinessGained += new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n OwnBoatReader.OnLivelinessLost += new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n OwnBoatReader.OnRequestedDeadlineMissed += new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n OwnBoatReader.OnRequestedIncompatibleQos += new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n OwnBoatReader.OnSampleLost += new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n OwnBoatReader.OnSampleRejected += new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n OwnBoatReader.OnSubscriptionMatched += new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n #endregion\n\n\n }\n\n public void Init()\n {\n m_LucidDreamContractManager = new LucidDreamContractManager();\n rabbit = new RabbitMQSender();\n \n m_LucidDreamContractManager.LoadFromFile(contractPath);\n m_LucidDreamContractManager.VivifyAll();\n SetupDataWriters();\n SetupDataReaders();\n RegistrationToEvents();\n }\n \n public void EnableAll()\n {\n\t\t\tm_LucidDreamContractManager.EnableAll();\n }\n \n public void Publish()\n {\n LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_type idl_idde_itfmod_to_3pa_own_boat_data_idde_itfmod_to_3pa_own_boat_data_type = new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_type();\n\n\n while(true)\n {\n System.Threading.Thread.Sleep(1000);\n }\n }\n\n public void Shutdown()\n {\n\n\n #region Unregister OwnBoatWriter events\n OwnBoatWriter.OnLivelinessLost -= new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n OwnBoatWriter.OnOfferedDeadlineMissed -= new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n OwnBoatWriter.OnOfferedIncompatibleQOS -= new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n OwnBoatWriter.OnPublicationMatched -= new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n #endregion\n\n #region Unregister OwnBoatReader events\n OwnBoatReader.OnSampleArrived -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeSampleArrivedHandler(OwnBoatReaderOnSampleArrived);\n OwnBoatReader.OnInstanceNotAliveNoWriters -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeInstanceNotAliveNoWritersHandler(OwnBoatReaderOnInstanceNotAliveNoWriters);\n OwnBoatReader.OnLivelinessGained -= new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n OwnBoatReader.OnLivelinessLost -= new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n OwnBoatReader.OnRequestedDeadlineMissed -= new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n OwnBoatReader.OnRequestedIncompatibleQos -= new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n OwnBoatReader.OnSampleLost -= new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n OwnBoatReader.OnSampleRejected -= new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n OwnBoatReader.OnSubscriptionMatched -= new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n #endregion\n\n m_LucidDreamContractManager.SubdueAll();\n m_LucidDreamContractManager.UnLoad();\n }\n \n #region Events\n #region General DataWriters Event\n /*! This event is raised when the DataWriter failed to write data\n within the time period set in its DeadlineQosPolicy */\n public void OnOfferedDeadlineMissedHendler(DataWriter dataWriter , OfferedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" missed its offered deadline \" + status.TotalCountChanged + \" times.\");\n }\n\n /*! This event is raised when the DataWriter failed to signal its liveliness\n within the time specified by the LivelinessQosPolicy */\n public void OnLivelinessLostHendler(DataWriter dataWriter, LivelinessLostStatus status)\n {\n Console.WriteLine(\"Liveliness Lost on DataWriter: \" + dataWriter.Name);\n }\n\n /*! This event is raised when the DataWriter discovered a DataReader for\n the same topic, but the DataReader had requested Qos settings incompatible \n with this DataWriter's offered Qos */\n public void OnOfferedIncompatibleQOSHendler(DataWriter dataWriter,OfferedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" QoS policies, that were incompatible with remote DataReader. Check contracts on both sides.\");\n }\n \n\t\t/*! This event is raised when the DataWriter discovered a matching DataReader */\n public void OnPublicationMatchedHendler(DataWriter dataWriter, PublicationMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" Publication matched\");\n }\n else\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" DataReader lost\");\n }\n }\n #endregion\n\n #region General DataReaders Event\n /*! This event is raised when the DataReader did not receive\n\t\ta new sample for an data-instance within the time period \n\t\tset in the DataReader's DeadlineQosPolicy */\n public void OnRequestedDeadlineMissedEventHendler(DataReader dataReader, RequestedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"requested deadline missed on DataReader \" + dataReader.Name +\n \"requested deadline that was not respected by DataWriter \" +\n status.TotalCountChanged);\n }\n\n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive changed from any number to 0 */\n public void OnLivelinessLostEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"Liveliness lost of \" + dataReader.Name);\n }\n \n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive increased from 0 to 1 */\n public void OnLivelinessGainedEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"a new Liveliness gained of \" + dataReader.Name);\n }\n\n\t\t/*! This event is raised when the DataReader discovered a dataWriter for \n the same Topic, but that DataReader has requested Qos settings incompatible\n with this DataWriter's offered Qos */\n public void OnRequestedIncompatibleQosEventHendler(DataReader dataReader, RequestedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"Incompatible Qos on topic \" + dataReader.Name +\n\t\t\t \" QoS policies, that were inconsistent with DataWriter. Check contracts on both sides.\");\n }\n\n /*! This event is raised when one or more samples received from the DataWriter\n have been dropped by the DataReader */\n public void OnSampleRejectedEventHendler(DataReader dataReader, SampleRejectedStatus status)\n {\n\t\t\tConsole.WriteLine(\" sample rejected in DataReader: \" + dataReader.Name + \"the reason is: \" +\n \"samples were rejected. Usually this happens when DataReader's memory resources are exhausted.\");\n }\n \n\t\t/*! This event is raised when one or more samples received from the DataWriter\n have failed to be received */\n public void OnSampleLostEventHendler(DataReader dataReader, SampleLostStatus status)\n {\n\t\t\tConsole.WriteLine(\"\\n\" + status.TotalCountChanged + \" Sample lost on DataReader: \" + dataReader.Name +\n\t\t\t \"\\n until now \" + status.TotalCount + \" samples lost\" + \n\t\t\t \"\\n samples were lost. Usually this happens when DataWriter writes faster than DataReader reads.\");\n\n }\n \n\t\t/*! This event is raised when the DataReader discovered a matching DataWriter */\n public void OnSubscriptionMatchedEventHendler(DataReader dataReader, SubscriptionMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" subscription matched\");\n }\n else\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" an existing matched DataWriter has been deleted\");\n }\n }\n #endregion\n\n #region Specific DataReaders Events\n #region OwnBoatReader Events\n public void OwnBoatReaderOnSampleArrived(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_type dataType, SampleInfo info, ValidityStatus validity)\n {\n OwnBoat_OriginalMessage converted_data = ConvertData(dataType);\n string data = JsonConvert.SerializeObject(converted_data);\n rabbit.SendData(data);\n Console.WriteLine(\"a new sample of \\\"idde_itfmod_to_3pa_own_boat_data_type\\\" has arrived\");\n }\n\n\n public void OwnBoatReaderOnInstanceNotAliveNoWriters(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_own_boat_data.idde_itfmod_to_3pa_own_boat_data_type dataType, SampleInfo info)\n {\n Console.WriteLine(\"an instance of \\\"idde_itfmod_to_3pa_own_boat_data_type\\\" has lost all its writers\");\n }\n\n #endregion\n #endregion\n #endregion\n\n #region ConvertClass\n public OwnBoat_OriginalMessage ConvertData(idde_itfmod_to_3pa_own_boat_data_type message)\n {\n OwnBoat_OriginalMessage newDataClass = new OwnBoat_OriginalMessage();\n\n newDataClass.idlHeader.message_state = message.idl_header.message_state;\n newDataClass.idlHeader.message_source = message.idl_header.message_source;\n newDataClass.idlHeader.number_of_bytes = message.idl_header.number_of_bytes;\n newDataClass.idlHeader.compile_time_of_message.time.hours = message.idl_header.compile_time_of_message.time.hours;\n newDataClass.idlHeader.compile_time_of_message.time.minutes = message.idl_header.compile_time_of_message.time.minutes;\n newDataClass.idlHeader.compile_time_of_message.time.seconds = message.idl_header.compile_time_of_message.time.seconds;\n newDataClass.idlHeader.compile_time_of_message.time.c_seconds = message.idl_header.compile_time_of_message.time.c_seconds;\n newDataClass.idlHeader.compile_time_of_message.date.year = message.idl_header.compile_time_of_message.date.year;\n newDataClass.idlHeader.compile_time_of_message.date.month = message.idl_header.compile_time_of_message.date.month;\n newDataClass.idlHeader.compile_time_of_message.date.day = message.idl_header.compile_time_of_message.date.day;\n\n newDataClass.systemTime.is_current = message.system_time.is_current;\n newDataClass.systemTime.sensor = message.system_time.sensor;\n newDataClass.systemTime.time.valid = message.system_time.time.valid;\n newDataClass.systemTime.time.value.time.hours = message.system_time.time.value.time.hours;\n newDataClass.systemTime.time.value.time.minutes = message.system_time.time.value.time.minutes;\n newDataClass.systemTime.time.value.time.seconds = message.system_time.time.value.time.seconds;\n newDataClass.systemTime.time.value.time.c_seconds = message.system_time.time.value.time.c_seconds;\n newDataClass.systemTime.time.value.date.year = message.system_time.time.value.date.year;\n newDataClass.systemTime.time.value.date.month = message.system_time.time.value.date.month;\n newDataClass.systemTime.time.value.date.day = message.system_time.time.value.date.day;\n\n newDataClass.timezone.data.valid = message.timezone.data.valid;\n newDataClass.timezone.data.value = message.timezone.data.value;\n newDataClass.timezone.is_current = message.timezone.is_current;\n newDataClass.timezone.sensor = message.timezone.sensor;\n newDataClass.timezone.time.valid = message.timezone.time.valid;\n newDataClass.timezone.time.value.time.hours = message.timezone.time.value.time.hours;\n newDataClass.timezone.time.value.time.minutes = message.timezone.time.value.time.minutes;\n newDataClass.timezone.time.value.time.seconds = message.timezone.time.value.time.seconds;\n newDataClass.timezone.time.value.time.c_seconds = message.timezone.time.value.time.c_seconds;\n newDataClass.timezone.time.value.date.year = message.timezone.time.value.date.year;\n newDataClass.timezone.time.value.date.month = message.timezone.time.value.date.month;\n newDataClass.timezone.time.value.date.day = message.timezone.time.value.date.day;\n\n newDataClass.heading.data.valid = message.heading.data.valid;\n newDataClass.heading.data.value = message.heading.data.value;\n newDataClass.heading.is_current = message.heading.is_current;\n newDataClass.heading.sensor = message.heading.sensor;\n newDataClass.heading.time.valid = message.heading.time.valid;\n newDataClass.heading.time.value.time.hours = message.heading.time.value.time.hours;\n newDataClass.heading.time.value.time.minutes = message.heading.time.value.time.minutes;\n newDataClass.heading.time.value.time.seconds = message.heading.time.value.time.seconds;\n newDataClass.heading.time.value.time.c_seconds = message.heading.time.value.time.c_seconds;\n newDataClass.heading.time.value.date.year = message.heading.time.value.date.year;\n newDataClass.heading.time.value.date.month = message.heading.time.value.date.month;\n newDataClass.heading.time.value.date.day = message.heading.time.value.date.day;\n\n newDataClass.heading_rate.data.valid = message.heading_rate.data.valid;\n newDataClass.heading_rate.data.value = message.heading_rate.data.value;\n newDataClass.heading_rate.is_current = message.heading_rate.is_current;\n newDataClass.heading_rate.sensor = message.heading_rate.sensor;\n newDataClass.heading_rate.time.valid = message.heading_rate.time.valid;\n newDataClass.heading_rate.time.value.time.hours = message.heading_rate.time.value.time.hours;\n newDataClass.heading_rate.time.value.time.minutes = message.heading_rate.time.value.time.minutes;\n newDataClass.heading_rate.time.value.time.seconds = message.heading_rate.time.value.time.seconds;\n newDataClass.heading_rate.time.value.time.c_seconds = message.heading_rate.time.value.time.c_seconds;\n newDataClass.heading_rate.time.value.date.year = message.heading_rate.time.value.date.year;\n newDataClass.heading_rate.time.value.date.month = message.heading_rate.time.value.date.month;\n newDataClass.heading_rate.time.value.date.day = message.heading_rate.time.value.date.day;\n\n newDataClass.roll.data.valid = message.roll.data.valid;\n newDataClass.roll.data.value = message.roll.data.value;\n newDataClass.roll.is_current = message.roll.is_current;\n newDataClass.roll.sensor = message.roll.sensor;\n newDataClass.roll.time.valid = message.roll.time.valid;\n newDataClass.roll.time.value.time.hours = message.roll.time.value.time.hours;\n newDataClass.roll.time.value.time.minutes = message.roll.time.value.time.minutes;\n newDataClass.roll.time.value.time.seconds = message.roll.time.value.time.seconds;\n newDataClass.roll.time.value.time.c_seconds = message.roll.time.value.time.c_seconds;\n newDataClass.roll.time.value.date.year = message.roll.time.value.date.year;\n newDataClass.roll.time.value.date.month = message.roll.time.value.date.month;\n newDataClass.roll.time.value.date.day = message.roll.time.value.date.day;\n\n newDataClass.roll_rate.data.valid = message.roll_rate.data.valid;\n newDataClass.roll_rate.data.value = message.roll_rate.data.value;\n newDataClass.roll_rate.is_current = message.roll_rate.is_current;\n newDataClass.roll_rate.sensor = message.roll_rate.sensor;\n newDataClass.roll_rate.time.valid = message.roll_rate.time.valid;\n newDataClass.roll_rate.time.value.time.hours = message.roll_rate.time.value.time.hours;\n newDataClass.roll_rate.time.value.time.minutes = message.roll_rate.time.value.time.minutes;\n newDataClass.roll_rate.time.value.time.seconds = message.roll_rate.time.value.time.seconds;\n newDataClass.roll_rate.time.value.time.c_seconds = message.roll_rate.time.value.time.c_seconds;\n newDataClass.roll_rate.time.value.date.year = message.roll_rate.time.value.date.year;\n newDataClass.roll_rate.time.value.date.month = message.roll_rate.time.value.date.month;\n newDataClass.roll_rate.time.value.date.day = message.roll_rate.time.value.date.day;\n\n newDataClass.pitch.data.valid = message.pitch.data.valid;\n newDataClass.pitch.data.value = message.pitch.data.value;\n newDataClass.pitch.is_current = message.pitch.is_current;\n newDataClass.pitch.sensor = message.pitch.sensor;\n newDataClass.pitch.time.valid = message.pitch.time.valid;\n newDataClass.pitch.time.value.time.hours = message.pitch.time.value.time.hours;\n newDataClass.pitch.time.value.time.minutes = message.pitch.time.value.time.minutes;\n newDataClass.pitch.time.value.time.seconds = message.pitch.time.value.time.seconds;\n newDataClass.pitch.time.value.time.c_seconds = message.pitch.time.value.time.c_seconds;\n newDataClass.pitch.time.value.date.year = message.pitch.time.value.date.year;\n newDataClass.pitch.time.value.date.month = message.pitch.time.value.date.month;\n newDataClass.pitch.time.value.date.day = message.pitch.time.value.date.day;\n\n newDataClass.pitch_rate.data.valid = message.pitch_rate.data.valid;\n newDataClass.pitch_rate.data.value = message.pitch_rate.data.value;\n newDataClass.pitch_rate.is_current = message.pitch_rate.is_current;\n newDataClass.pitch_rate.sensor = message.pitch_rate.sensor;\n newDataClass.pitch_rate.time.valid = message.pitch_rate.time.valid;\n newDataClass.pitch_rate.time.value.time.hours = message.pitch_rate.time.value.time.hours;\n newDataClass.pitch_rate.time.value.time.minutes = message.pitch_rate.time.value.time.minutes;\n newDataClass.pitch_rate.time.value.time.seconds = message.pitch_rate.time.value.time.seconds;\n newDataClass.pitch_rate.time.value.time.c_seconds = message.pitch_rate.time.value.time.c_seconds;\n newDataClass.pitch_rate.time.value.date.year = message.pitch_rate.time.value.date.year;\n newDataClass.pitch_rate.time.value.date.month = message.pitch_rate.time.value.date.month;\n newDataClass.pitch_rate.time.value.date.day = message.pitch_rate.time.value.date.day;\n\n newDataClass.heave.data.valid = message.heave.data.valid;\n newDataClass.heave.data.value = message.heave.data.value;\n newDataClass.heave.is_current = message.heave.is_current;\n newDataClass.heave.sensor = message.heave.sensor;\n newDataClass.heave.time.valid = message.heave.time.valid;\n newDataClass.heave.time.value.time.hours = message.heave.time.value.time.hours;\n newDataClass.heave.time.value.time.minutes = message.heave.time.value.time.minutes;\n newDataClass.heave.time.value.time.seconds = message.heave.time.value.time.seconds;\n newDataClass.heave.time.value.time.c_seconds = message.heave.time.value.time.c_seconds;\n newDataClass.heave.time.value.date.year = message.heave.time.value.date.year;\n newDataClass.heave.time.value.date.month = message.heave.time.value.date.month;\n newDataClass.heave.time.value.date.day = message.heave.time.value.date.day;\n\n newDataClass.heave.data.valid = message.heave.data.valid;\n newDataClass.heave.data.value = message.heave.data.value;\n newDataClass.heave.is_current = message.heave.is_current;\n newDataClass.heave.sensor = message.heave.sensor;\n newDataClass.heave.time.valid = message.heave.time.valid;\n newDataClass.heave.time.value.time.hours = message.heave.time.value.time.hours;\n newDataClass.heave.time.value.time.minutes = message.heave.time.value.time.minutes;\n newDataClass.heave.time.value.time.seconds = message.heave.time.value.time.seconds;\n newDataClass.heave.time.value.time.c_seconds = message.heave.time.value.time.c_seconds;\n newDataClass.heave.time.value.date.year = message.heave.time.value.date.year;\n newDataClass.heave.time.value.date.month = message.heave.time.value.date.month;\n newDataClass.heave.time.value.date.day = message.heave.time.value.date.day;\n\n newDataClass.heave_rate.data.valid = message.heave_rate.data.valid;\n newDataClass.heave_rate.data.value = message.heave_rate.data.value;\n newDataClass.heave_rate.is_current = message.heave_rate.is_current;\n newDataClass.heave_rate.sensor = message.heave_rate.sensor;\n newDataClass.heave_rate.time.valid = message.heave_rate.time.valid;\n newDataClass.heave_rate.time.value.time.hours = message.heave_rate.time.value.time.hours;\n newDataClass.heave_rate.time.value.time.minutes = message.heave_rate.time.value.time.minutes;\n newDataClass.heave_rate.time.value.time.seconds = message.heave_rate.time.value.time.seconds;\n newDataClass.heave_rate.time.value.time.c_seconds = message.heave_rate.time.value.time.c_seconds;\n newDataClass.heave_rate.time.value.date.year = message.heave_rate.time.value.date.year;\n newDataClass.heave_rate.time.value.date.month = message.heave_rate.time.value.date.month;\n newDataClass.heave_rate.time.value.date.day = message.heave_rate.time.value.date.day;\n\n newDataClass.course_over_ground.data.valid = message.course_over_ground.data.valid;\n newDataClass.course_over_ground.data.value = message.course_over_ground.data.value;\n newDataClass.course_over_ground.is_current = message.course_over_ground.is_current;\n newDataClass.course_over_ground.sensor = message.course_over_ground.sensor;\n newDataClass.course_over_ground.time.valid = message.course_over_ground.time.valid;\n newDataClass.course_over_ground.time.value.time.hours = message.course_over_ground.time.value.time.hours;\n newDataClass.course_over_ground.time.value.time.minutes = message.course_over_ground.time.value.time.minutes;\n newDataClass.course_over_ground.time.value.time.seconds = message.course_over_ground.time.value.time.seconds;\n newDataClass.course_over_ground.time.value.time.c_seconds = message.course_over_ground.time.value.time.c_seconds;\n newDataClass.course_over_ground.time.value.date.year = message.course_over_ground.time.value.date.year;\n newDataClass.course_over_ground.time.value.date.month = message.course_over_ground.time.value.date.month;\n newDataClass.course_over_ground.time.value.date.day = message.course_over_ground.time.value.date.day;\n\n\n return newDataClass;\n\n }\n #endregion\n }\n\n class SubscriberProgram\n {\n static void Main(string[] args)\n {\n LucidDreamSystemClient mySystemClient = new LucidDreamSystemClient();\n mySystemClient.Init();\n\t\t\tmySystemClient.EnableAll();\n\t\t\tmySystemClient.Publish();\n \n Console.WriteLine(\"Shutting Down...\");\n mySystemClient.Shutdown();\n }\n }\n}\n" }, { "alpha_fraction": 0.4610079526901245, "alphanum_fraction": 0.4710875451564789, "avg_line_length": 24.472972869873047, "blob_id": "981ae2b299fb433e61deed28c94bca5c1c20944a", "content_id": "be769322676a57474a135ebff310fee7850ba10d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 89, "num_lines": 74, "path": "/TrackBeamParser/TrackBeamParser/BeamsBuffer.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public static class BeamsBuffer\n {\n const int beamsNumber = 192;\n const int maxBufferSize = 1000000;\n static volatile int indexOfEndInBuffers;\n\n\n static volatile byte[][] Beams;\n public static Object Locker = new Object();\n\n public static double Heading { get; internal set; }\n\n static BeamsBuffer()\n {\n cleanBeams();\n }\n\n public static void WriteBeamsFromDictionary(byte[][] beamsValues)\n {\n lock (Locker)\n {\n int j = 0;\n int beamLength = beamsValues[0].Length;\n\n foreach (var beam in beamsValues)\n {\n Buffer.BlockCopy(beam, 0, Beams[j], indexOfEndInBuffers, beamLength);\n j++;\n }\n\n indexOfEndInBuffers += beamLength;\n }\n }\n\n public static byte[][] getBeamsAndFlush()\n {\n lock (Locker)\n {\n if (indexOfEndInBuffers == 0)\n return null;\n\n byte[][] beamArray = new byte[beamsNumber][];\n int j = 0;\n\n foreach (var beam in Beams)\n {\n beamArray[j] = new byte[indexOfEndInBuffers];\n Buffer.BlockCopy(beam, 0, beamArray[j], 0, indexOfEndInBuffers);\n j++;\n }\n\n cleanBeams();\n return beamArray;\n }\n }\n\n private static void cleanBeams()\n {\n indexOfEndInBuffers = 0;\n Beams = new byte[beamsNumber][];\n\n for (int i = 0; i < Beams.Length; i++)\n {\n Beams[i] = new byte[maxBufferSize];\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.6746841669082642, "alphanum_fraction": 0.6768707633018494, "avg_line_length": 27.386207580566406, "blob_id": "4ad93bdb0922e5a15939a789c9d2bf75d6a1b9e3", "content_id": "4c207570ec27d994afce7bfe5f15ba86b3d26ab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4116, "license_type": "no_license", "max_line_length": 91, "num_lines": 145, "path": "/UI/AngularDreamUI/src/app/audio-manager/audio-manager.component.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { Component, OnInit, ElementRef, ViewChild, AfterViewInit } from '@angular/core';\nimport { WebSocketService } from \".././websocket.service\";\n\n@Component({\n selector: 'app-audio-manager',\n templateUrl: './audio-manager.component.html',\n styleUrls: ['./audio-manager.component.css']\n})\nexport class AudioManagerComponent implements OnInit, AfterViewInit {\n\n @ViewChild('seekslider', { static: false }) seeksliderRef: ElementRef;\n\n // Properties of the class\n hidePlayButton: boolean;\n startRecordingButtonEnabled: boolean;\n myAudio: HTMLAudioElement;\n playRecordInteravlId: any;\n playRecordingButtonEnabled: boolean;\n blockSeek: boolean;\n audioWasPaused: boolean;\n sliderCurrentValue: string;\n\n constructor(private webSocketService: WebSocketService) {\n this.hidePlayButton = false;\n this.startRecordingButtonEnabled = true;\n this.playRecordingButtonEnabled = false;\n this.blockSeek = false;\n this.audioWasPaused = true;\n this.sliderCurrentValue = \"0\";\n this.myAudio = new Audio();\n }\n\n getAudioSourceFromServer() {\n this.myAudio.src = 'http://localhost:3000/wav';\n };\n\n // Create event listeners for the audio\n initAudioPlayer() {\n\n // Listener for event: 'ended'\n this.myAudio.addEventListener('ended', () => {\n this.hidePlayButton = false;\n this.seeksliderRef.nativeElement.value = 0;\n clearInterval(this.playRecordInteravlId);\n });\n\n // Listener for event: 'play'\n this.myAudio.addEventListener('play', () => {\n this.hidePlayButton = true;\n });\n\n // Listener for event: 'pause'\n this.myAudio.addEventListener('pause', () => {\n this.hidePlayButton = false;\n });\n\n // Listener for event: 'loadedmetadata'\n this.myAudio.addEventListener('loadedmetadata', () => {\n console.log(\"wav duration: \" + this.myAudio.duration);\n this.seeksliderRef.nativeElement.max = this.myAudio.duration;\n\n // Button control\n this.playRecordingButtonEnabled = true;\n });\n };\n\n playRecordedAudio() {\n this.myAudio.play();\n };\n\n pauseRecordedAudio() {\n this.myAudio.pause();\n };\n\n startRecording() {\n // Button control\n this.startRecordingButtonEnabled = false;\n\n // Emit signal to server\n this.webSocketService.emit('server_startRecording', 'Record');\n };\n\n stopRecording() {\n // Button control\n this.startRecordingButtonEnabled = true;\n\n // Emit signal to server\n this.webSocketService.emit('server_stopRecording', 'Stop');\n\n // get the audio we recorded\n this.getAudioSourceFromServer();\n };\n\n \n onSeek = (event) => {\n console.log(\"onSeek val is :\" + event.target.value );\n if (!this.blockSeek) {\n this.blockSeek = true;\n this.audioWasPaused = this.myAudio.paused;\n this.myAudio.pause();\n }\n console.log(\"onSeek: slider seeked value: \" + this.sliderCurrentValue );\n console.log(\"onSeek: currentTime-1: \" + this.myAudio.currentTime );\n this.myAudio.currentTime = 1;\n console.log(\"onSeek: currentTime-2: \" + this.myAudio.currentTime );\n }\n\n onSeekRelease = () => {\n console.log(\"onSeekRelease\");\n console.log(\"onSeekRelease: slider seeked value: \" + this.sliderCurrentValue );\n console.log(\"onSeekRelease: slider current time: \" + this.myAudio.currentTime );\n if (!this.audioWasPaused) {\n this.myAudio.play();\n }\n this.blockSeek = false;\n }\n\n onTimeupdate = () => {\n console.log(\"onTimeupdate\");\n if (!this.blockSeek) {\n this.sliderCurrentValue = this.myAudio.currentTime.toString();\n }\n }\n\n ngOnInit() {\n // Listen to events from server\n this.webSocketService.listen('client_recordingStarted').subscribe((data) => {\n console.log(data);\n });\n\n this.webSocketService.listen('client_recordingStopped').subscribe((data) => {\n console.log(data);\n });\n\n }\n\n ngAfterViewInit() {\n\n this.myAudio.addEventListener('timeupdate', this.onTimeupdate, false);\n this.seeksliderRef.nativeElement.addEventListener('input', this.onSeek, false);\n this.seeksliderRef.nativeElement.addEventListener('change', this.onSeekRelease, false);\n\n this.initAudioPlayer();\n }\n}\n" }, { "alpha_fraction": 0.5657092928886414, "alphanum_fraction": 0.5657092928886414, "avg_line_length": 30.054054260253906, "blob_id": "e2f5cd702a036916f2cb23e9fe20e497505ec27d", "content_id": "925606aa1134707d6c02b0d74e2998a81e2d7bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 96, "num_lines": 37, "path": "/TrackBeamParser/TrackBeamParser/TrackBeamDataSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public static class TrackBeamDataSender\n {\n static IModel beamTrackDataChannel;\n\n static TrackBeamDataSender()\n {\n IConnection connection = RabbitMQConnection.getConnection();\n beamTrackDataChannel = connection.CreateModel();\n\n beamTrackDataChannel.ExchangeDeclare(exchange: \"beamTrackData\",\n type: ExchangeType.Fanout);\n }\n\n public static void sendTrackBeamData(TrackBeamData trackBeamData)\n {\n byte[] body = Encoding.Default.GetBytes(JsonConvert.SerializeObject(trackBeamData));\n\n beamTrackDataChannel.BasicPublish(exchange: \"beamTrackData\",\n routingKey: \"\",\n basicProperties: null,\n body: body);\n }\n\n public static void dispose()\n {\n beamTrackDataChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.5596880912780762, "alphanum_fraction": 0.5602879524230957, "avg_line_length": 33.72916793823242, "blob_id": "4468619f88a64d01ba01d6d338213bc60a3013cb", "content_id": "fb2e7a9f1baf7ff4db5b298760048d9a01d9add5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1669, "license_type": "no_license", "max_line_length": 118, "num_lines": 48, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/TrackBeamDataReciever.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing Newtonsoft.Json;\nusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace BestTrackBeamSticher\n{\n public static class TrackBeamDataReciever\n {\n static IModel trackDataChannel;\n\n public static void StartListening(Action<TrackBeamData> funcThatWantTheData)\n {\n IConnection connection = RabbitMQConnection.getConnection();\n trackDataChannel = connection.CreateModel();\n\n trackDataChannel.ExchangeDeclare(exchange: \"beamTrackData\",\n type: ExchangeType.Fanout);\n\n trackDataChannel.QueueDeclare(queue: \"beamTrack\",\n durable: false,\n exclusive: false,\n autoDelete: true,\n arguments: null);\n\n trackDataChannel.QueueBind(queue: \"beamTrack\", exchange: \"beamTrackData\", routingKey: \"\");\n\n var consumer = new EventingBasicConsumer(trackDataChannel);\n consumer.Received += (model, ea) =>\n {\n byte[] body = ea.Body;\n TrackBeamData trackData = JsonConvert.DeserializeObject<TrackBeamData>(Encoding.UTF8.GetString(body));\n funcThatWantTheData(trackData);\n };\n\n trackDataChannel.BasicConsume(queue: \"beamTrack\",\n autoAck: true,\n consumer: consumer);\n }\n\n public static void stopListening()\n {\n trackDataChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.5769692063331604, "alphanum_fraction": 0.5793682336807251, "avg_line_length": 33.97901916503906, "blob_id": "d9996d95d19c6cbcce05c48d2050636d17ef3244", "content_id": "f48914d091226c9ef8dc888af9e18f2f0f2f0f3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5004, "license_type": "no_license", "max_line_length": 116, "num_lines": 143, "path": "/Simulator/MainSendWindow/MainWindow.xaml.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing BdtCasMessage;\nusing NavMessage;\nusing BeamBusCas;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.Windows;\nusing System.Windows.Controls;\nusing System.Windows.Data;\nusing System.Windows.Documents;\nusing System.Windows.Input;\nusing System.Windows.Media;\nusing System.Windows.Media.Imaging;\nusing System.Windows.Navigation;\nusing System.Windows.Shapes;\nusing MainSendWindow.Properties;\nusing System.Threading;\nusing WaterfallSimulator;\n\nnamespace MainSendWindow\n{\n /// <summary>\n /// Interaction logic for MainWindow.xaml\n /// </summary>\n public partial class MainWindow : Window\n {\n private NavRabbitMQ _navRabbitMQ;\n private BdtCasRabbitMQ _bdtCasRabbitMQ;\n private Manager manager;\n\n private bool _isSendingUAGMessages;\n private bool _isSendingBeamBusCasMessages;\n\n public MainWindow()\n {\n InitializeComponent();\n _navRabbitMQ = new NavRabbitMQ(Settings.Default.Nav_ExchangeName);\n _bdtCasRabbitMQ = new BdtCasRabbitMQ(Settings.Default.BdtCas_ExchangeName);\n _isSendingUAGMessages = false;\n manager = new Manager();\n }\n\n private void sendBeamBusCasMessages_Click(object sender, RoutedEventArgs e)\n {\n if (!_isSendingBeamBusCasMessages)\n {\n _isSendingBeamBusCasMessages = true;\n BeamBusCasSender.isSending = true;\n beamBusCasButton.Content = \"Stop Sending BeamBus Cas Messages\";\n Thread beamBusCasSenderThread = new Thread(delegate ()\n {\n BeamBusCasSender.SendMessage();\n });\n\n beamBusCasSenderThread.SetApartmentState(ApartmentState.STA); // needs to be STA or throws exception\n beamBusCasSenderThread.Start();\n }\n else\n {\n _isSendingBeamBusCasMessages = false;\n BeamBusCasSender.isSending = false;\n beamBusCasButton.Content = \"Start Sending BeamBus Cas Messages\";\n }\n\n }\n\n private void sendNavMessages_Click(object sender, RoutedEventArgs e)\n {\n\n OriginalNavMessage navObject = new OriginalNavMessage();\n Thread navSenderThread = new Thread(delegate ()\n {\n while (true)\n {\n _navRabbitMQ.SendMessage(navObject);\n Thread.Sleep(1000);\n }\n });\n navSenderThread.SetApartmentState(ApartmentState.STA); // needs to be STA or throws exception\n navSenderThread.Start();\n\n }\n\n private void sendAllMessages_Click(object sender, RoutedEventArgs e)\n {\n if (!_isSendingUAGMessages)\n {\n _isSendingUAGMessages = true;\n allUAGButton.Content = \"Stop Sending All UAG Messages\";\n OriginalBdtCasMessage bdtCasObject = new OriginalBdtCasMessage();\n OriginalNavMessage navObject = new OriginalNavMessage();\n Thread allUAGSenderThread = new Thread(delegate ()\n {\n while (_isSendingUAGMessages)\n {\n _bdtCasRabbitMQ.SendMessage(bdtCasObject);\n _navRabbitMQ.SendMessage(navObject);\n Thread.Sleep(1000);\n Console.WriteLine(\"Send Nav And BdtCas Message\");\n }\n });\n allUAGSenderThread.SetApartmentState(ApartmentState.STA); // needs to be STA or throws exception\n allUAGSenderThread.Start();\n }\n else\n {\n _isSendingUAGMessages = false;\n allUAGButton.Content = \"Start Sending All UAG Messages\";\n Console.WriteLine(\"Stop Send Messages\");\n }\n\n\n }\n\n private void sendBdtCasMessages_Click(object sender, RoutedEventArgs e)\n {\n OriginalBdtCasMessage bdtCasObject = new OriginalBdtCasMessage();\n Thread bdtCasSenderThread = new Thread(delegate ()\n {\n while (true)\n {\n _bdtCasRabbitMQ.SendMessage(bdtCasObject);\n Thread.Sleep(1000);\n }\n });\n\n bdtCasSenderThread.SetApartmentState(ApartmentState.STA); // needs to be STA or throws exception\n bdtCasSenderThread.Start();\n }\n\n private void sendWaterFall_Click(object sender, RoutedEventArgs e)\n {\n Thread waterFallSenderThread = new Thread(delegate ()\n {\n manager.Start();\n });\n waterFallSenderThread.SetApartmentState(ApartmentState.STA); // needs to be STA or throws exception\n waterFallSenderThread.Start();\n }\n }\n}\n" }, { "alpha_fraction": 0.5953795313835144, "alphanum_fraction": 0.6072607040405273, "avg_line_length": 31.934782028198242, "blob_id": "bd68bbcd309d9d9522eb09bd4e91fa2e18d14e22", "content_id": "5dff06cfe559f46e4f363eca92787a3cadac328e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 114, "num_lines": 46, "path": "/TrackBeamParser/TrackBeamParser/BeamMaker.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public static class BeamMaker\n {\n public static void onReceiveTracks(SystemTracks trackData)\n {\n double heading = BeamsBuffer.Heading;\n\n //retreive the actual beams from the BeamBuffer\n byte[][] beamArray = BeamsBuffer.getBeamsAndFlush();\n\n if (beamArray == null)\n return;\n\n foreach (var track in trackData.systemTracks)\n {\n TrackBeamData trackBeamData = CalcBeams(track.trackID, heading, track.relativeBearing, beamArray);\n TrackBeamDataSender.sendTrackBeamData(trackBeamData);\n }\n }\n\n public static TrackBeamData CalcBeams(long trackNum, double heading, double RB, byte[][] beamArray)\n {\n var trackBeamData = new TrackBeamData();\n trackBeamData.TrackNum = (int)trackNum;\n\n double trackDegree = (heading + RB) % 360;\n const double factor = 192.0 / 360.0;\n double beamNumber = trackDegree * factor;\n\n int beamNum1 = (int)(Math.Floor(beamNumber));\n int beamNum2 = (int)(Math.Ceiling(beamNumber)); \n double precentage = Math.Abs(beamNum2 - beamNumber);\n\n trackBeamData.Beam1 = beamArray[beamNum1];\n trackBeamData.Beam2 = beamArray[beamNum2];\n trackBeamData.Precentage = precentage;\n\n return trackBeamData;\n }\n }\n}\n" }, { "alpha_fraction": 0.5957019925117493, "alphanum_fraction": 0.6008595824241638, "avg_line_length": 33.2156867980957, "blob_id": "0cea43de9d21706480b0099662192541a350f747", "content_id": "c0a0ec53f8ee1fa52474acb0e97637d77070b7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3492, "license_type": "no_license", "max_line_length": 86, "num_lines": 102, "path": "/Simulator/BdtCasMessage/src/OriginalBdtCasMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing static GlobalResources.BasicData;\n\n\nnamespace BdtCasMessage\n{\n public class OriginalBdtCasMessage\n {\n public TimeType timeStamp;\n public List<TrackData> systemTracks;\n\n public OriginalBdtCasMessage()\n {\n GetGenerateObject();\n }\n\n public OriginalBdtCasMessage(TimeType timeStamp, List<TrackData> systemTracks)\n {\n this.timeStamp = new TimeType();\n this.timeStamp.c_seconds = timeStamp.c_seconds;\n this.timeStamp.seconds = timeStamp.seconds;\n this.timeStamp.minutes = timeStamp.minutes;\n this.timeStamp.hours = timeStamp.hours;\n this.timeStamp.day = timeStamp.day;\n this.timeStamp.month = timeStamp.month;\n this.timeStamp.year = timeStamp.year;\n\n this.systemTracks = new List<TrackData>();\n\n for(int i = 0; i < systemTracks.Count; i++)\n {\n TrackData trackData = new TrackData();\n trackData.trackID = systemTracks[i].trackID;\n trackData.trackState = systemTracks[i].trackState;\n\n trackData.creationTime = new TimeType();\n trackData.creationTime.c_seconds = timeStamp.c_seconds;\n trackData.creationTime.seconds = timeStamp.seconds;\n trackData.creationTime.minutes = timeStamp.minutes;\n trackData.creationTime.hours = timeStamp.hours;\n trackData.creationTime.day = timeStamp.day;\n trackData.creationTime.month = timeStamp.month;\n trackData.creationTime.year = timeStamp.year;\n\n trackData.relativeBearing = systemTracks[i].relativeBearing;\n trackData.relativeBearingRate = systemTracks[i].relativeBearingRate;\n this.systemTracks.Add(trackData);\n }\n }\n private OriginalBdtCasMessage GetGenerateObject()\n {\n this.timeStamp = new TimeType();\n this.timeStamp.c_seconds = 1;\n this.timeStamp.seconds = 1;\n this.timeStamp.minutes = 1;\n this.timeStamp.hours = 1;\n this.timeStamp.day = 1;\n this.timeStamp.month = 1;\n this.timeStamp.year = 1;\n\n this.systemTracks = new List<TrackData>();\n var trackData = new TrackData();\n trackData.trackID = 1;\n trackData.trackState = State.NewTrack;\n\n trackData.creationTime = new TimeType();\n trackData.creationTime.c_seconds = 1;\n trackData.creationTime.seconds = 1;\n trackData.creationTime.minutes = 1;\n trackData.creationTime.hours = 1;\n trackData.creationTime.day = 1;\n trackData.creationTime.month = 1;\n trackData.creationTime.year = 1;\n\n trackData.relativeBearing = 1;\n trackData.relativeBearingRate = 1;\n this.systemTracks.Add(trackData);\n return new OriginalBdtCasMessage(this.timeStamp, this.systemTracks);\n }\n }\n\n public struct TrackData\n {\n public long trackID;\n public State trackState;\n public TimeType creationTime;\n public float relativeBearing;\n public float relativeBearingRate;\n }\n public enum State\n {\n NewTrack,\n UpdateTrack,\n DeleteTrack\n }\n\n \n}\n" }, { "alpha_fraction": 0.515031635761261, "alphanum_fraction": 0.5245253443717957, "avg_line_length": 29.829267501831055, "blob_id": "dd812d15d1d7ebd9ff6fbc3b964425a33f9d0328", "content_id": "61edf5f21c4591f5e3140a308448e1cb9191be9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 88, "num_lines": 41, "path": "/lucidDBManager/LucidDream_OwnBoat_Microservice/src/RabbitMQSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\n\nnamespace LucidDreamSystem\n{\n class RabbitMQSender\n {\n private ConnectionFactory factory;\n private IConnection connection;\n private IModel channel;\n\n public RabbitMQSender()\n {\n factory = new ConnectionFactory()\n {\n HostName = \"localhost\",\n UserName = \"ferasg\",\n Password = \"123456\"\n //HostName = \"172.16.20.161\",\n //UserName = \"rutush\",\n //Password = \"123456\"\n };\n connection = factory.CreateConnection();\n channel = connection.CreateModel();\n channel.ExchangeDeclare(exchange: \"OwnBoatData\", type: ExchangeType.Fanout);\n }\n public void SendData(string data)\n {\n var body = Encoding.UTF8.GetBytes(data);\n channel.BasicPublish(exchange: \"OwnBoatData\",\n routingKey: \"\",\n basicProperties: null,\n body: body);\n Console.WriteLine(\" [x] Sent {0}\", data);\n }\n }\n}\n" }, { "alpha_fraction": 0.5326259732246399, "alphanum_fraction": 0.5506631135940552, "avg_line_length": 30.94915199279785, "blob_id": "047051e9696cb29273db66587491bb134f8d893a", "content_id": "931531ab9c5db29afb98654e7349b5380d43931a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 86, "num_lines": 59, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/Stitcher.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing BestTrackBeamSticher;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace BestTrackBeamStitcher\n{\n public static class Stitcher\n {\n public static TrackWithStitchedBeam stitch(TrackBeamData trackBeamData)\n {\n TrackWithStitchedBeam trackWithStitchedBeam = new TrackWithStitchedBeam();\n trackWithStitchedBeam.TrackNum = trackBeamData.TrackNum;\n\n byte[] beam1 = trackBeamData.Beam1;\n byte[] beam2 = trackBeamData.Beam2;\n double beam1Precentage = trackBeamData.Precentage;\n double beam2Precentage = 1- trackBeamData.Precentage;\n\n setVolume(beam1, beam1Precentage);\n setVolume(beam2, beam2Precentage);\n\n byte[] stitchedBeam = stitchBeams(beam1, beam2);\n trackWithStitchedBeam.StitchedBeam = stitchedBeam;\n\n return trackWithStitchedBeam;\n }\n\n static private void setVolume(byte[] buffer, double volume)\n {\n // scaling volume of buffer audio\n for (int i = 0; i < buffer.Length / 2; ++i)\n {\n // convert to 16-bit\n short sample = (short)((buffer[i * 2 + 1] << 8) | buffer[i * 2]);\n\n // scale\n double gain = volume; // value between 0 and 1.0\n sample = (short)(sample * gain + 0.5);\n\n // back to byte[]\n buffer[i * 2 + 1] = (byte)(sample >> 8);\n buffer[i * 2] = (byte)(sample & 0xff);\n }\n }\n\n static private byte[] stitchBeams(byte[] buffer, byte[] buffer2)\n {\n byte[] mixedBuffer = new byte[buffer.Length];\n\n for (int i = 0; i < buffer.Length; ++i)\n {\n mixedBuffer[i] = (byte)(buffer[i] + buffer2[i]);\n }\n\n return mixedBuffer;\n }\n }\n}\n" }, { "alpha_fraction": 0.5796737670898438, "alphanum_fraction": 0.5796737670898438, "avg_line_length": 21.13888931274414, "blob_id": "ce7c094243d50ed1447c77aaf93d0afe9b060b54", "content_id": "f090ef59fe9224f8224920dd50b114259beecfdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 799, "license_type": "no_license", "max_line_length": 51, "num_lines": 36, "path": "/lucidDBManager/lucidDBManager/Data/IPSOriginalMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace lucidDBManager.Data\n{\n class IPSOriginalMessage\n {\n public long count;\n public long last_pulse;\n public long freeze;\n public List<IPSTrack> tracks;\n public PingSteal ping_steal;\n\n\n public class IPSTrack\n {\n\n }\n\n public class PingSteal\n {\n public long track_identification;\n public float time_distance;\n public float reflection_depth;\n public float own_boat_depth;\n public float sound_velocity;\n public FloatRealValidType target_range;\n }\n public class FloatRealValidType\n {\n public bool valid;\n public float value;\n }\n }\n}\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7130952477455139, "avg_line_length": 30.11111068725586, "blob_id": "413843a030e4a1230ada8712e1184c5a27ec63b5", "content_id": "b517c1858bd04732c4cd6ea6ff2782d350f4e237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 103, "num_lines": 27, "path": "/AnalysisReceivingRabbitMQPython/RabbitMQPythonReceiving.py", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import pika\n\nprint('started...')\n\ndef callback(ch, method, properties, body):\n print(\" [x] %r\" % body)\n \n\t\nif __name__ == '__main__':\n\n credentials = pika.PlainCredentials('ferasg', '123456')\n parameters = pika.ConnectionParameters(credentials=credentials,host='192.168.43.215') \n \n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n\n channel.exchange_declare(exchange='trackWithStitchedBeamData', exchange_type='fanout', durable=True)\n\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n\n channel.queue_bind(exchange='trackWithStitchedBeamData', queue=queue_name)\n\n print(' [*] Waiting for logs. To exit press CTRL+C')\n channel.basic_consume(queue = queue_name, on_message_callback = callback, auto_ack=True)\n\n channel.start_consuming()\n" }, { "alpha_fraction": 0.6480541229248047, "alphanum_fraction": 0.6480541229248047, "avg_line_length": 17.46875, "blob_id": "0d06057922d808857092a0db731f28fbeca55b1f", "content_id": "e3cec4891988484db0c3882cdc84225d2c0f1857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 593, "license_type": "no_license", "max_line_length": 44, "num_lines": 32, "path": "/lucidDBManager/lucidDBManager/Data/TrackData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing static lucidDBManager.Data.BasicData;\n\nnamespace lucidDBManager.Data\n{\n public struct TrackData\n {\n public long trackID;\n public State trackState;\n public TimeType creationTime;\n public float relativeBearing;\n public float relativeBearingRate;\n }\n\n public class SystemTracks\n {\n public TimeType timeStamp;\n public List<TrackData> systemTracks;\n }\n\n \n\n public enum State\n {\n NewTrack,\n UpdateTrack,\n DeleteTrack\n }\n\n}\n" }, { "alpha_fraction": 0.6116883158683777, "alphanum_fraction": 0.6116883158683777, "avg_line_length": 18.743589401245117, "blob_id": "43ac4650774c23144060077df03605c02449eb6a", "content_id": "51b6f659288dc84ebaaaa258a9b39b793203b8de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 772, "license_type": "no_license", "max_line_length": 44, "num_lines": 39, "path": "/TrackBeamParser/TrackBeamParser/SystemTracks.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace TrackBeamParser\n{\n public struct TrackData\n {\n public long trackID;\n public State trackState;\n public TimeType creationTime;\n public float relativeBearing;\n public float relativeBearingRate;\n }\n\n public class SystemTracks\n {\n public TimeType timeStamp;\n public List<TrackData> systemTracks;\n }\n\n public struct TimeType\n {\n public long hours;\n public long minutes;\n public long seconds;\n public long c_seconds;\n public long year;\n public long month;\n public long day;\n }\n\n public enum State\n {\n NewTrack,\n UpdateTrack,\n DeleteTrack\n }\n}\n" }, { "alpha_fraction": 0.5569155216217041, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 24.53125, "blob_id": "678d140abefd5334e7c4fc637e21f0f159fca267", "content_id": "9ef7e419c63dc00e234bff47162c63d8fed4154b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 819, "license_type": "no_license", "max_line_length": 83, "num_lines": 32, "path": "/Simulator/BeamBusCas/FileEdit.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.IO;\n\nnamespace BeamBusCas\n{\n public static class FileEdit\n {\n public static byte[][] GetRecording(string path)\n {\n int subSegmentsNum;\n byte[] casBeamBusRecording;\n byte[][] SubSegments;\n\n\n casBeamBusRecording = File.ReadAllBytes(path);\n subSegmentsNum = casBeamBusRecording.Length / 1400;\n SubSegments = new byte[subSegmentsNum][];\n\n for (int i = 0; i < subSegmentsNum; i++)\n {\n SubSegments[i] = new byte[1400];\n Array.Copy(casBeamBusRecording, i * 1400, SubSegments[i], 0, 1400);\n }\n\n return (SubSegments);\n }\n }\n}\n" }, { "alpha_fraction": 0.5113636255264282, "alphanum_fraction": 0.5113636255264282, "avg_line_length": 13.666666984558105, "blob_id": "468d5a61ef7153b9b5e3c7b2b12809bf843fbbf0", "content_id": "edb10141df75a20d1f19dcfbdb6601ae1f8bf627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 178, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/lucidDBManager/lucidDBManager/Program.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\n\nnamespace lucidDBManager\n{\n class Program\n {\n static void Main(string[] args)\n {\n Manager mgr = new Manager();\n }\n }\n}\n" }, { "alpha_fraction": 0.5122858285903931, "alphanum_fraction": 0.5142256617546082, "avg_line_length": 30.085426330566406, "blob_id": "d783e0407004045911893fd04f263fba2142c358", "content_id": "0ea0a7bf29dba3adf27aefe4548cc3d2161c59c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 6188, "license_type": "no_license", "max_line_length": 101, "num_lines": 199, "path": "/lucidDBManager/lucidDBManager/DataHandler.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing lucidDBManager.Data;\nusing lucidDBManager.mongoDB;\nusing lucidDBManager.RabbitMQ;\nusing Newtonsoft.Json;\nusing static lucidDBManager.Data.BasicData;\nusing static lucidDBManager.Data.BasicOriginalData;\n\nnamespace lucidDBManager\n{\n public class DataHandler\n {\n RabbitMQSender sender;\n\n MongoDBServer db;\n\n Manager manager;\n\n // system track helper types\n bool[] isKnownTarget;\n TimeStampType[] creationTime;\n TMAOriginalMessage lastTracksMessage;\n\n\n public DataHandler(RabbitMQSender sender, MongoDBServer db, Manager mgr)\n {\n this.sender = sender;\n this.db = db;\n this.manager = mgr;\n isKnownTarget = new bool[26];\n creationTime = new TimeStampType[26];\n\n for (int i = 0; i < isKnownTarget.Length; i++)\n {\n isKnownTarget[i] = false;\n }\n }\n\n public void ReceiveActionMessage(string message)\n {\n switch (message)\n {\n case \"Record\":\n manager.StartReceivingUAG();\n break;\n\n case \"Stop\":\n manager.StopReceivingUAG();\n break;\n\n case \"Play\":\n break;\n default:\n break;\n }\n }\n\n // Recieves a string in a Json format.\n // Handle the Received TMA message\n public void ReceiveTMAData(TMAOriginalMessage receivedMessage)\n {\n HandleTMAMessage(receivedMessage);\n }\n\n // Handle a TMA message\n public void HandleTMAMessage(TMAOriginalMessage message)\n {\n SystemTracks sysTracks = new SystemTracks();\n\n sysTracks.timeStamp = convertTime(message.timeStamp);\n\n sysTracks.systemTracks = new List<TrackData>();\n\n foreach (OriginalSystemTrack OrigTrack in message.systemTracks)\n {\n // if track exists\n if (OrigTrack.trackId != 0)\n {\n TrackData newTrackData = new TrackData();\n\n newTrackData.trackID = OrigTrack.trackId;\n\n newTrackData.relativeBearing = OrigTrack.bearing;\n\n if (OrigTrack.bearingRate.valid)\n {\n newTrackData.relativeBearingRate = OrigTrack.bearingRate.value;\n }\n\n // if new track\n if (!isKnownTarget[OrigTrack.trackId - 1])\n {\n isKnownTarget[OrigTrack.trackId - 1] = true;\n creationTime[OrigTrack.trackId - 1] = OrigTrack.timeStamp;\n\n newTrackData.trackState = State.NewTrack;\n newTrackData.creationTime = convertTime(OrigTrack.timeStamp);\n }\n // if old track\n else\n {\n newTrackData.trackState = State.UpdateTrack;\n newTrackData.creationTime = convertTime(creationTime[OrigTrack.trackId - 1]);\n }\n\n sysTracks.systemTracks.Add(newTrackData);\n }\n }\n\n if (lastTracksMessage != null)\n {\n foreach (var currTrack in lastTracksMessage.systemTracks)\n {\n if (currTrack.trackId != 0)\n {\n // check if track was deleted\n if (!sysTracks.systemTracks.Exists(x => x.trackID == currTrack.trackId))\n {\n isKnownTarget[currTrack.trackId - 1] = false;\n TrackData newTrack = new TrackData()\n {\n trackID = currTrack.trackId,\n trackState = State.DeleteTrack\n };\n\n sysTracks.systemTracks.Add(newTrack);\n }\n }\n }\n }\n\n lastTracksMessage = message;\n\n // send to stiching\n sender.SendTrackData(sysTracks);\n\n // save to db\n db.saveRecord(sysTracks, \"SystemTrack\");\n }\n\n // Recieves a string in a Json format.\n // Handle the Received OwnBoat message\n public void ReceiveOwnBoatData(OwnBoatOriginalMessage receivedMessage)\n {\n HandleOwnBoatMessage(receivedMessage);\n }\n\n // Handle the Own Boat message\n public void HandleOwnBoatMessage(OwnBoatOriginalMessage message)\n {\n OwnBoatData ownBoat = new OwnBoatData();\n\n // convert\n ownBoat.timeStamp = convertTime(message.systemTime.time.value);\n ownBoat.timeZone = message.timeZone.data.value;\n ownBoat.heading = message.heading.data.value;\n ownBoat.pitch = message.pitch.data.value;\n ownBoat.roll = message.roll.data.value;\n ownBoat.heave = message.heave.data.value;\n\n\n sender.SendOwnBoatData(ownBoat);\n\n //save to db\n db.saveRecord(ownBoat, \"OwnBoat\");\n }\n\n public TimeType convertTime(TimeStampType origType)\n {\n TimeType newType;\n newType.c_seconds = origType.time.c_seconds;\n newType.seconds = origType.time.seconds;\n newType.minutes = origType.time.minutes;\n newType.hours = origType.time.hours;\n newType.day = origType.date.day;\n newType.month = origType.date.month;\n newType.year = origType.date.year;\n\n return newType;\n }\n\n public void SendOfflineTrackData()\n {\n // get track data by id from db\n }\n\n public void SendOfflineAudioFile()\n {\n // get wav file by id\n }\n\n public void SendOfflineOwnBoatData()\n {\n // get own boat data by id from db\n }\n }\n}\n" }, { "alpha_fraction": 0.6819277405738831, "alphanum_fraction": 0.6819277405738831, "avg_line_length": 22.05555534362793, "blob_id": "014369c5c2e0f8d797fad860d80e035bd14df9a1", "content_id": "7c5a2ff9d809fbc85253025b798f8cdc95228a0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 417, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/lucidDBManager/lucidDBManager/Data/OwnBoatData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing static lucidDBManager.Data.BasicData;\n\nnamespace lucidDBManager.Data\n{\n public class OwnBoatData\n {\n public TimeType timeStamp;\n public double timeZone;\n public double heading;\n public double roll;\n public double pitch;\n public double heave;\n public double course_overe_ground;\n }\n}\n" }, { "alpha_fraction": 0.6540296673774719, "alphanum_fraction": 0.6540296673774719, "avg_line_length": 30.697479248046875, "blob_id": "b91cf22249806316021301df2be180ff6e58efed", "content_id": "2e445095484c210b1869b3e8fa542d8c097195a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3774, "license_type": "no_license", "max_line_length": 77, "num_lines": 119, "path": "/lucidDBManager/lucidDBManager/Data/OwnBoatOriginalMessage.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing static lucidDBManager.Data.BasicOriginalData;\n\nnamespace lucidDBManager.Data\n{\n public class OwnBoatOriginalMessage\n {\n public IdlHeader idlHeader = new IdlHeader();\n public SystemTime systemTime = new SystemTime();\n public Timezone timeZone = new Timezone();\n public Heading heading = new Heading();\n public HeadingRate heading_rate = new HeadingRate();\n public Roll roll = new Roll();\n public RollRate roll_rate = new RollRate();\n public Pitch pitch = new Pitch();\n public PitchRate pitch_rate = new PitchRate();\n public Heave heave = new Heave();\n public HeaveRate heave_rate = new HeaveRate();\n public CourseOverGround course_overe_ground = new CourseOverGround();\n }\n\n public class IdlHeader\n {\n public long message_state;\n public long message_source;\n public TimeStampType compile_time_of_message = new TimeStampType();\n public long number_of_bytes;\n }\n\n public class SystemTime\n {\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class TimeStampValidType\n {\n public bool valid;\n public TimeStampType value = new TimeStampType();\n }\n public class Timezone\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n\n }\n public class LongRealValidType\n {\n public bool valid;\n public double value;\n }\n public class Heading\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class HeadingRate\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class Roll\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class RollRate\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class Pitch\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class PitchRate\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class Heave\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class HeaveRate\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n public class CourseOverGround\n {\n public LongRealValidType data = new LongRealValidType();\n public bool is_current;\n public long sensor;\n public TimeStampValidType time = new TimeStampValidType();\n }\n}\n" }, { "alpha_fraction": 0.5189393758773804, "alphanum_fraction": 0.5194805264472961, "avg_line_length": 35.959999084472656, "blob_id": "fb8bbaf08a5d7f2284d4c022363bf04adab8b7fc", "content_id": "72446cdfdab860660924bfd01fca79d1d89c4e45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1850, "license_type": "no_license", "max_line_length": 120, "num_lines": 50, "path": "/LiveAudioPlayer/LiveAudioPlayer/PlayerCommandsReciever.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing RabbitMQ.Client;\nusing RabbitMQ.Client.Events;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace LiveAudioPlayer\n{\n public static class PlayerCommandsReciever\n {\n static IModel playerCommandsChannel;\n\n public static void StartListening(Action<string> funcThatWantTheData)\n {\n IConnection connection = RabbitMQConnection.getConnection();\n playerCommandsChannel = connection.CreateModel();\n\n playerCommandsChannel.ExchangeDeclare(exchange: \"liveAudioPlayerCommands\",\n type: ExchangeType.Fanout,\n durable: true,\n autoDelete: false,\n arguments: null);\n\n playerCommandsChannel.QueueDeclare(queue: \"commandQueue\",\n durable: true,\n exclusive: false,\n autoDelete: false,\n arguments: null);\n\n playerCommandsChannel.QueueBind(queue: \"commandQueue\", exchange: \"liveAudioPlayerCommands\", routingKey: \"\");\n\n var consumer = new EventingBasicConsumer(playerCommandsChannel);\n consumer.Received += (model, ea) =>\n {\n byte[] body = ea.Body;\n string command = Encoding.UTF8.GetString(body);\n funcThatWantTheData(command);\n };\n\n playerCommandsChannel.BasicConsume(queue: \"commandQueue\",\n autoAck: true,\n consumer: consumer);\n }\n\n public static void stopListening()\n {\n playerCommandsChannel.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.6122661232948303, "alphanum_fraction": 0.6138253808021545, "avg_line_length": 29.0625, "blob_id": "6a136c55699a2a1b53c426f0c23a19a5ea76dd7d", "content_id": "742b7aea8aa1a558e1404746195e43da9c4d64c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1924, "license_type": "no_license", "max_line_length": 94, "num_lines": 64, "path": "/UI/AngularDreamUI/src/app/track-list/track-list.component.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport { Track } from '../track';\nimport { WebSocketService } from '../websocket.service';\nimport { MessageService } from '../message.service';\n\n@Component({\n selector: 'app-track-list',\n templateUrl: './track-list.component.html',\n styleUrls: ['./track-list.component.css']\n})\nexport class TrackListComponent implements OnInit {\n\n tracks: Track[] = [\n\n ];\n\n selectedTrack: Track;\n flag = false;\n\n onSelect(track: Track): void {\n console.log(track.id);\n // Send the clicked track id to the service in order to present the chosen track data\n this.service.sendMessage(track.id.toString());\n \n // Emit the track id to the server in order to change the track audio to the chosen one\n this.webSocketService.emit('server_chosenTrackId', track.id.toString());\n }\n\n constructor(private webSocketService: WebSocketService, private service: MessageService) { }\n\n ngOnInit() {\n this.webSocketService.listen('client_trackData').subscribe((data) => {\n // console.log(data);\n var trackListObj = JSON.parse(data as string);\n console.log(data);\n console.log(trackListObj);\n console.log(trackListObj.systemTracks[0].trackID);\n\n\n\n // Set the track list that has given from server \n \n if (this.tracks.length == 0) {\n trackListObj.systemTracks.forEach(track => {\n this.tracks.push({ id: track.trackID, name: track.trackID });\n });\n }\n else {\n //this.course = ownboatObj.course;\n trackListObj.systemTracks.forEach(track => {\n for (var i = 0; i < this.tracks.length; i++) {\n if (this.tracks[i].id == (track.trackID)) {\n this.flag = true;\n }\n }\n if (!this.flag) {\n this.tracks.push({ id: track.trackID, name: track.trackID });\n this.flag = false;\n }\n });\n }\n });\n }\n}\n" }, { "alpha_fraction": 0.4908998906612396, "alphanum_fraction": 0.5010111331939697, "avg_line_length": 25.7297306060791, "blob_id": "b56c7ca18eb57c5ae47340a3be6f6d681a57489d", "content_id": "7e5da7ce5c48f8afaba183f4b20105252f51ecd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1980, "license_type": "no_license", "max_line_length": 62, "num_lines": 74, "path": "/Simulator/WaterfallSimulator/Manager.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.Windows.Threading;\nusing WaterfallSimulator.Properties;\n\nnamespace WaterfallSimulator\n{\n public class Manager\n {\n private DispatcherTimer timer;\n private LineGenerator generator;\n private LSRHeaderGenerator lsrHeaderGen;\n private ComWriter writer;\n\n public Manager()\n {\n writer = new ComWriter();\n generator = new LineGenerator();\n lsrHeaderGen = new LSRHeaderGenerator();\n\n timer = new DispatcherTimer();\n timer.Interval = new TimeSpan(0, 0, 0, 0, 500);\n timer.Tick += Timer_Tick;\n }\n\n public void Start()\n {\n timer.Start();\n }\n\n private void Timer_Tick(object sender, EventArgs e)\n {\n //writer.SendCommand('F');\n byte[] dataToSend = new byte[4121];\n dataToSend[0] = Convert.ToByte('P');\n byte[] temp = lsrHeaderGen.CreateHeader();\n for (int i = 1; i < 25; i++)\n {\n dataToSend[i] = temp[i - 1];\n\n }\n\n if (Settings.Default.SendRecordedData)\n {\n temp = generator.ReadLineFromWF();\n }\n else\n {\n temp = generator.GenerateRandomLine();\n \n }\n for (int i = 25; i < dataToSend.Length; i++)\n {\n dataToSend[i] = temp[i - 25];\n }\n writer.Send(dataToSend);\n //writer.SendCommand('P');\n //writer.Send(lsrHeaderGen.CreateHeader());\n\n //if (Settings.Default.SendRecordedData)\n //{\n // writer.Send(generator.ReadLineFromWF());\n //}\n //else\n //{\n // writer.Send(generator.GenerateRandomLine());\n //}\n\n }\n }\n}\n" }, { "alpha_fraction": 0.6529850959777832, "alphanum_fraction": 0.6604477763175964, "avg_line_length": 18.14285659790039, "blob_id": "225d217e2dc68a8995c8256d6faad921d1efcfad", "content_id": "1ff34d84e36655b480548006e3af7518496ad1ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 270, "license_type": "no_license", "max_line_length": 33, "num_lines": 14, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/TrackBeamData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace BestTrackBeamSticher\n{\n public class TrackBeamData\n {\n public int TrackNum;\n public byte[] Beam1;\n public byte[] Beam2;\n public double Precentage;\n }\n}\n" }, { "alpha_fraction": 0.5638766288757324, "alphanum_fraction": 0.5651353001594543, "avg_line_length": 30.156862258911133, "blob_id": "a4b55264f58be8cd6f4776aa8c01575a92d7e3cf", "content_id": "3ecce5c532a58f27bb2e77c307c008c97ea799bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 93, "num_lines": 51, "path": "/lucidDBManager/lucidDBManager/RabbitMQ/RabbitMQSender.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing lucidDBManager.Data;\nusing RabbitMQ.Client;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\nusing Newtonsoft.Json;\n\nnamespace lucidDBManager.RabbitMQ\n{\n public class RabbitMQSender\n {\n ConnectionFactory Factory { get; set; }\n\n IConnection Connection { get; set; }\n\n IModel Channel { get; set; }\n\n public RabbitMQSender()\n {\n Factory = new ConnectionFactory() { HostName = \"localhost\" };\n Connection = Factory.CreateConnection();\n Channel = Connection.CreateModel();\n Channel.ExchangeDeclare(exchange: \"LucidTrackData\", type: ExchangeType.Fanout);\n Channel.ExchangeDeclare(exchange: \"LucidOwnBoatData\", type: ExchangeType.Fanout);\n }\n\n public void SendTrackData(SystemTracks message)\n {\n string json = JsonConvert.SerializeObject(message);\n\n var body = Encoding.UTF8.GetBytes(json);\n\n Channel.BasicPublish(exchange: \"LucidTrackData\",\n routingKey: \"\",\n basicProperties: null,\n body: body);\n }\n\n public void SendOwnBoatData(OwnBoatData message)\n {\n string json = JsonConvert.SerializeObject(message);\n\n var body = Encoding.UTF8.GetBytes(json);\n\n Channel.BasicPublish(exchange: \"LucidOwnBoatData\",\n routingKey: \"\",\n basicProperties: null,\n body: body);\n }\n }\n}\n" }, { "alpha_fraction": 0.5274725556373596, "alphanum_fraction": 0.5398351550102234, "avg_line_length": 20.41176414489746, "blob_id": "550fbd03f51397da00eaf41aacea76daf69dd1a4", "content_id": "8e4b2a499cea49c96b9c67f91d539ae147f10ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 730, "license_type": "no_license", "max_line_length": 52, "num_lines": 34, "path": "/BestTrackBeamSticher/BestTrackBeamSticher/RabbitMQConnection.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing RabbitMQ.Client;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace BestTrackBeamSticher\n{\n public static class RabbitMQConnection\n {\n static IConnection connection;\n\n static RabbitMQConnection()\n {\n var factory = new ConnectionFactory()\n {\n HostName = \"172.16.20.53\",\n UserName = \"ferasg\",\n Password = \"123456\",\n };\n\n connection = factory.CreateConnection();\n }\n\n internal static IConnection getConnection()\n {\n return connection;\n }\n\n public static void dispose()\n {\n connection.Dispose();\n }\n }\n}\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.529411792755127, "avg_line_length": 18.83333396911621, "blob_id": "fd450949e07d57861134f5674deae9b237b1acfb", "content_id": "c4f419c397da98b5cc30ae8158e71d65a96b540b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 597, "license_type": "no_license", "max_line_length": 34, "num_lines": 30, "path": "/lucidDBManager/lucidDBManager/Data/BasicOriginalData.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace lucidDBManager.Data\n{\n public class BasicOriginalData\n {\n public class TimeStampType\n {\n public HmssType time;\n public YmdType date;\n }\n\n public class HmssType\n {\n public long hours;\n public long minutes;\n public long seconds;\n public long c_seconds;\n }\n\n public class YmdType\n {\n public long year;\n public long month;\n public long day;\n }\n }\n}\n" }, { "alpha_fraction": 0.6847162842750549, "alphanum_fraction": 0.6893455982208252, "avg_line_length": 49.55673599243164, "blob_id": "af24c7555f910d8071c96f8fa43caca6ed1d244c", "content_id": "ce4018c867d4bf4d4da5c31fdf591364ace2e31f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 14257, "license_type": "no_license", "max_line_length": 343, "num_lines": 282, "path": "/lucidDBManager/LucidDream_TMA_Microservice/src/LucidDream_TMA_Managed.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n// Generated at : 31/10/2019 15:16:17 ,in TIK46593 PC ,by liran harari\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\nusing System;\nusing LucidDreamContractManager_Managed;\nusing DDS_ManagedShell;\n\nnamespace LucidDreamSystem\n{\n class LucidDreamSystemClient\n {\n \n #region Data members\n\t\t// contarct file path\n static string contractPath = \"../config/LucidDream_ContractManagerSystem_Contract.xml\";\n\n //contract Manager\n private LucidDreamContractManager m_LucidDreamContractManager;\n\n\t\t#region DataWriters\n\t\tprivate LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataWriter m_TmaDataWriter;\n\t\t#endregion\n \n\t\t#region DataReaders\n\t\tprivate LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataReader m_TmaDataReader;\n\t\t#endregion\n\t\t#endregion\n\n\t\t#region Properties\n\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataWriter TmaDataWriter\n {\n get { return m_TmaDataWriter; }\n set { m_TmaDataWriter = value; }\n }\n\n\t\t\n\n public LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataReader TmaDataReader\n {\n get { return m_TmaDataReader; }\n set { m_TmaDataReader = value; }\n }\n\n\n #endregion\n \n\t\tpublic void SetupDataWriters()\n\t\t{\n\t\t\tTmaDataWriter = m_LucidDreamContractManager.Get_LucidDreamParticipant_TmaDataWriter();\n\t\t}\n\t\t\n\t\tpublic void SetupDataReaders()\n\t\t{\n\t\t\tTmaDataReader = m_LucidDreamContractManager.Get_LucidDreamParticipant_TmaDataReader();\n\t\t}\n\t\t\n\t\tpublic void RegistrationToEvents()\n\t\t{\n\t\t\t\n\t\t\t\n\t\t\t#region Register TmaDataWriter events\n TmaDataWriter.OnLivelinessLost += new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n TmaDataWriter.OnOfferedDeadlineMissed += new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n TmaDataWriter.OnOfferedIncompatibleQOS += new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n TmaDataWriter.OnPublicationMatched += new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n\t\t\t#endregion\n \n\t\t\t#region Register TmaDataReader events\n\t\t\tTmaDataReader.OnSampleArrived += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeSampleArrivedHandler(TmaDataReaderOnSampleArrived);\n\t\t\tTmaDataReader.OnInstanceNotAliveNoWriters += new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeInstanceNotAliveNoWritersHandler(TmaDataReaderOnInstanceNotAliveNoWriters);\n TmaDataReader.OnLivelinessGained += new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n TmaDataReader.OnLivelinessLost += new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n TmaDataReader.OnRequestedDeadlineMissed += new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n TmaDataReader.OnRequestedIncompatibleQos += new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n TmaDataReader.OnSampleLost += new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n TmaDataReader.OnSampleRejected += new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n TmaDataReader.OnSubscriptionMatched += new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n\t\t\t#endregion\n \n \n\t\t}\n\t\t\n public void Init()\n {\n m_LucidDreamContractManager = new LucidDreamContractManager();\n m_LucidDreamContractManager.LoadFromFile(contractPath);\n m_LucidDreamContractManager.VivifyAll();\n SetupDataWriters();\n SetupDataReaders();\n RegistrationToEvents();\n }\n \n public void EnableAll()\n {\n\t\t\tm_LucidDreamContractManager.EnableAll();\n }\n \n public void Publish()\n {\n LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_type idl_idde_itfmod_to_3pa_system_target_data_idde_itfmod_to_3pa_system_target_data_type = new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_type(); \n \n\t\t\t\n\t\t\tfor (int i = 0; i < 500; i++)\n\t\t\t{\n\t\t\t\tSystem.Threading.Thread.Sleep(1000);\n\t\t\t\t\n\t\t\t\t//TmaDataWriter.Write(idl_idde_itfmod_to_3pa_system_target_data_idde_itfmod_to_3pa_system_target_data_type);\n\t\t\t\t\n\t\t\t\t//Console.WriteLine(\"\\n****** Samples set #\" + i + \" was sent ******\\n\");\n\t\t\t\t\n\t\t\t\t// To dispose some dataType use this code\n\t\t\t\t//mySystemClient.SomeDataWriter.Dispose(dataType);\n }\n }\n\n public void Shutdown()\n {\n\t\t\t\n\t\t\t\n\t\t\t#region Unregister TmaDataWriter events\n\t\t\tTmaDataWriter.OnLivelinessLost -= new WriterLivelinessLostDelegate(OnLivelinessLostHendler);\n TmaDataWriter.OnOfferedDeadlineMissed -= new OfferedDeadlineMissedDelegate(OnOfferedDeadlineMissedHendler);\n TmaDataWriter.OnOfferedIncompatibleQOS -= new OfferedIncompatibleQOSDelegate(OnOfferedIncompatibleQOSHendler);\n TmaDataWriter.OnPublicationMatched -= new PublicationMatchedDelegate(OnPublicationMatchedHendler);\n\t\t\t#endregion\n\t\t\t\t\t\t \n\t\t\t#region Unregister TmaDataReader events\n\t\t\tTmaDataReader.OnSampleArrived -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeSampleArrivedHandler(TmaDataReaderOnSampleArrived);\n\t\t\tTmaDataReader.OnInstanceNotAliveNoWriters -= new LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeInstanceNotAliveNoWritersHandler(TmaDataReaderOnInstanceNotAliveNoWriters);\n TmaDataReader.OnLivelinessGained -= new DDS_ManagedShell.LivelinessGainedDelegate(OnLivelinessGainedEventHendler);\n TmaDataReader.OnLivelinessLost -= new ReaderLivelinessLostDelegate(OnLivelinessLostEventHendler);\n TmaDataReader.OnRequestedDeadlineMissed -= new RequestedDeadlineMissedDelegate(OnRequestedDeadlineMissedEventHendler);\n TmaDataReader.OnRequestedIncompatibleQos -= new DDS_ManagedShell.RequestedIncompatibleQosDelegate(OnRequestedIncompatibleQosEventHendler);\n TmaDataReader.OnSampleLost -= new DDS_ManagedShell.SampleLostDelegate(OnSampleLostEventHendler);\n TmaDataReader.OnSampleRejected -= new DDS_ManagedShell.SampleRejectedDelegate(OnSampleRejectedEventHendler);\n TmaDataReader.OnSubscriptionMatched -= new DDS_ManagedShell.SubscriptionMatchedDelegate(OnSubscriptionMatchedEventHendler);\n\t\t\t#endregion\n \n\t\t\tm_LucidDreamContractManager.SubdueAll();\n m_LucidDreamContractManager.UnLoad();\n }\n \n #region Events\n #region General DataWriters Event\n /*! This event is raised when the DataWriter failed to write data\n within the time period set in its DeadlineQosPolicy */\n public void OnOfferedDeadlineMissedHendler(DataWriter dataWriter , OfferedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" missed its offered deadline \" + status.TotalCountChanged + \" times.\");\n }\n\n /*! This event is raised when the DataWriter failed to signal its liveliness\n within the time specified by the LivelinessQosPolicy */\n public void OnLivelinessLostHendler(DataWriter dataWriter, LivelinessLostStatus status)\n {\n Console.WriteLine(\"Liveliness Lost on DataWriter: \" + dataWriter.Name);\n }\n\n /*! This event is raised when the DataWriter discovered a DataReader for\n the same topic, but the DataReader had requested Qos settings incompatible \n with this DataWriter's offered Qos */\n public void OnOfferedIncompatibleQOSHendler(DataWriter dataWriter,OfferedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"In DataWriter \" + dataWriter.Name +\n \" QoS policies, that were incompatible with remote DataReader. Check contracts on both sides.\");\n }\n \n\t\t/*! This event is raised when the DataWriter discovered a matching DataReader */\n public void OnPublicationMatchedHendler(DataWriter dataWriter, PublicationMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" Publication matched\");\n }\n else\n {\n Console.WriteLine(\"in DataWriter: \" + dataWriter.Name + \" \" + status.CurrentCountChange + \" DataReader lost\");\n }\n }\n #endregion\n\n #region General DataReaders Event\n /*! This event is raised when the DataReader did not receive\n\t\ta new sample for an data-instance within the time period \n\t\tset in the DataReader's DeadlineQosPolicy */\n public void OnRequestedDeadlineMissedEventHendler(DataReader dataReader, RequestedDeadlineMissedStatus status)\n {\n\t\t\tConsole.WriteLine(\"requested deadline missed on DataReader \" + dataReader.Name +\n \"requested deadline that was not respected by DataWriter \" +\n status.TotalCountChanged);\n }\n\n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive changed from any number to 0 */\n public void OnLivelinessLostEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"Liveliness lost of \" + dataReader.Name);\n }\n \n\t\t/*! This event is raised when the number of matched DataWriters that are \n currently alive increased from 0 to 1 */\n public void OnLivelinessGainedEventHendler(DataReader dataReader, LivelinessChangedStatus status)\n {\n\t\t\tConsole.WriteLine(\"a new Liveliness gained of \" + dataReader.Name);\n }\n\n\t\t/*! This event is raised when the DataReader discovered a dataWriter for \n the same Topic, but that DataReader has requested Qos settings incompatible\n with this DataWriter's offered Qos */\n public void OnRequestedIncompatibleQosEventHendler(DataReader dataReader, RequestedIncompatibleQOSStatus status)\n {\n\t\t\tConsole.WriteLine(\"Incompatible Qos on topic \" + dataReader.Name +\n\t\t\t \" QoS policies, that were inconsistent with DataWriter. Check contracts on both sides.\");\n }\n\n /*! This event is raised when one or more samples received from the DataWriter\n have been dropped by the DataReader */\n public void OnSampleRejectedEventHendler(DataReader dataReader, SampleRejectedStatus status)\n {\n\t\t\tConsole.WriteLine(\" sample rejected in DataReader: \" + dataReader.Name + \"the reason is: \" +\n \"samples were rejected. Usually this happens when DataReader's memory resources are exhausted.\");\n }\n \n\t\t/*! This event is raised when one or more samples received from the DataWriter\n have failed to be received */\n public void OnSampleLostEventHendler(DataReader dataReader, SampleLostStatus status)\n {\n\t\t\tConsole.WriteLine(\"\\n\" + status.TotalCountChanged + \" Sample lost on DataReader: \" + dataReader.Name +\n\t\t\t \"\\n until now \" + status.TotalCount + \" samples lost\" + \n\t\t\t \"\\n samples were lost. Usually this happens when DataWriter writes faster than DataReader reads.\");\n\n }\n \n\t\t/*! This event is raised when the DataReader discovered a matching DataWriter */\n public void OnSubscriptionMatchedEventHendler(DataReader dataReader, SubscriptionMatchedStatus status)\n {\n\t\t\tif (status.CurrentCountChange > 0)\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" subscription matched\");\n }\n else\n {\n Console.WriteLine(\"in dataReader: \" + dataReader.Name + \" \" + status.CurrentCountChange + \" an existing matched DataWriter has been deleted\");\n }\n }\n #endregion\n\n #region Specific DataReaders Events\n\t\t#region TmaDataReader Events\n public void TmaDataReaderOnSampleArrived(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_type dataType, SampleInfo info, ValidityStatus validity)\n {\n\t\t\tConsole.WriteLine(\"a new sample of \\\"idde_itfmod_to_3pa_system_target_data_type\\\" has arrived\");\n }\n \n \n public void TmaDataReaderOnInstanceNotAliveNoWriters(LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_typeDataReader dr, LucidDream_DataTypesManaged.idl_idde_itfmod_to_3pa_system_target_data.idde_itfmod_to_3pa_system_target_data_type dataType, SampleInfo info)\n {\n\t\t\tConsole.WriteLine(\"an instance of \\\"idde_itfmod_to_3pa_system_target_data_type\\\" has lost all its writers\");\n }\n \n #endregion\n #endregion\n #endregion\n }\n\n class SubscriberProgram\n {\n static void Main(string[] args)\n {\n LucidDreamSystemClient mySystemClient = new LucidDreamSystemClient();\n mySystemClient.Init();\n\t\t\tmySystemClient.EnableAll();\n\t\t\tmySystemClient.Publish();\n \n Console.WriteLine(\"Shutting Down...\");\n mySystemClient.Shutdown();\n }\n }\n}\n" }, { "alpha_fraction": 0.5047479867935181, "alphanum_fraction": 0.5135135054588318, "avg_line_length": 22.60344886779785, "blob_id": "050c1db0aabbc6a9230057e8a3b58b983b0d2425", "content_id": "ae38971c83aa74e03a01a6d6ef66695595f82a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 51, "num_lines": 58, "path": "/Simulator/WaterfallSimulator/ComWriter.cs", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "๏ปฟusing System;\nusing System.Collections.Generic;\nusing System.IO.Ports;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.Windows.Threading;\n\nnamespace WaterfallSimulator\n{\n class ComWriter : IWriter\n {\n private SerialPort serialPort;\n\n public ComWriter()\n {\n serialPort = new SerialPort();\n Configure();\n }\n\n public void Configure()\n {\n #region Serial Port config\n serialPort.PortName = \"COM2\";\n serialPort.DataBits = 8;\n serialPort.StopBits = StopBits.One;\n serialPort.BaudRate = 38400;\n serialPort.Parity = Parity.None;\n serialPort.WriteBufferSize = 4096;\n serialPort.Encoding = Encoding.ASCII;\n #endregion\n if (!serialPort.IsOpen)\n {\n serialPort.Open();\n }\n\n }\n\n public void SendCommand(char command)\n {\n //Task.Run(() =>\n //{\n serialPort.Write(command.ToString());\n serialPort.DiscardOutBuffer();\n //});\n }\n\n public void Send(byte[] data)\n {\n //Task.Run(() =>\n //{\n serialPort.Write(data, 0, data.Length);\n serialPort.DiscardOutBuffer();\n //});\n }\n\n }\n}\n" }, { "alpha_fraction": 0.7305101156234741, "alphanum_fraction": 0.7314725518226624, "avg_line_length": 31.46875, "blob_id": "e0e019ca12fbc8f592d7a623f5290144fc4c4418", "content_id": "7e75b3449fd00688b2ca730ebad326f09fb187e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 91, "num_lines": 32, "path": "/UI/AngularDreamUI/src/app/app.module.ts", "repo_name": "elixdlol/lucid-dream", "src_encoding": "UTF-8", "text": "import { BrowserModule } from '@angular/platform-browser';\nimport { NgModule } from '@angular/core';\n\nimport { AppComponent } from './app.component';\nimport { TrackComponent } from './track/track.component';\nimport { TrackListComponent } from './track-list/track-list.component';\nimport { OwnboatComponent } from './ownboat/ownboat.component';\nimport { ChartComponent } from './chart/chart.component';\nimport { TrackDataComponent } from './track-data/track-data.component';\nimport { CustomTrackDataComponent } from './custom-track-data/custom-track-data.component';\nimport { AudioManagerComponent } from './audio-manager/audio-manager.component';\nimport { ChartsModule } from 'ng2-charts';\n\n@NgModule({\n declarations: [\n AppComponent,\n TrackComponent,\n TrackListComponent,\n OwnboatComponent,\n ChartComponent,\n TrackDataComponent,\n CustomTrackDataComponent,\n AudioManagerComponent\n ],\n imports: [\n BrowserModule,\n ChartsModule\n ],\n providers: [],\n bootstrap: [AppComponent]\n})\nexport class AppModule { }\n" } ]
63
frascuelillo/frascu-stream
https://github.com/frascuelillo/frascu-stream
b0494b950b2b4bf8c62a2c34e7b03041cdef66ed
70608ed004ce767cce89e9b99b3c9576ef241a38
7c59beabef330cf0ae6a430be555489753510c8f
refs/heads/master
2021-11-04T03:22:19.165418
2021-10-28T13:47:32
2021-10-28T13:47:32
200,808,216
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7903682589530945, "alphanum_fraction": 0.7960339784622192, "avg_line_length": 34.29999923706055, "blob_id": "2f7d0691a73d57d7c043e0f7bddceedff12a3c38", "content_id": "cf1109656e4c77cd3d7178899bc36c206a239340", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 354, "license_type": "permissive", "max_line_length": 114, "num_lines": 10, "path": "/README.md", "repo_name": "frascuelillo/frascu-stream", "src_encoding": "UTF-8", "text": "# frascu-stream\nThreaded Python OpenCV camera stream with FPS measurement\nThis is my first GitHub contribution.\nItยดs a Python Module that give threaded camera stream with the capability to get reading FPS using self.fps_stream\nIf you run the script it have a little test example. \nYou could use it as a imported module.\n\nCreated with Python 3.6\n\nFrancisco Melรฉndez\n" }, { "alpha_fraction": 0.6484817266464233, "alphanum_fraction": 0.665465772151947, "avg_line_length": 21.858823776245117, "blob_id": "98f676c8c445c28fba221b2a9913d4c484b156a6", "content_id": "d82c3f0429fb0785c05a0cb050cde89742a8b6ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "permissive", "max_line_length": 82, "num_lines": 85, "path": "/camerastream.py", "repo_name": "frascuelillo/frascu-stream", "src_encoding": "UTF-8", "text": "# Get camera images by independ thread\nimport cv2\nfrom threading import Thread\nimport time\n\n\nclass CameraStream:\n\n\tdef __init__(self, source=0, name=\"CameraStream\"):\n\t\t# Start camera and read one frame\n\t\tself.cap = cv2.VideoCapture(source+cv2.CAP_DSHOW)\n\t\tself.ret, self.frame = self.cap.read()\n\t\tself.name = name\n\t\tself.fps_stream=0\n\t\tself.stop = False\n\t\tself.finished = False\n\n\n\tdef start(self):\n\t\t# Start the update trhead\n\t\tt = Thread(target=self.update, name=self.name, args=())\n\t\tt.start()\n\t\treturn self\n\n\tdef update(self):\n\t\t# Start de imread loop and measure read FPS every n cicles\n\t\tcount=0\n\t\tcicles=20\n\t\twhile self.stop == False:\n\t\t\tif count==0:\n\t\t\t\tinicio=time.perf_counter()\n\t\t\tself.ret, self.frame = self.cap.read()\n\t\t\tif self.ret==False:\n\t\t\t\tprint('No frame received from camera, thread stopped')\n\t\t\t\tbreak\n\t\t\tcount = count+1\n\t\t\tif count==cicles:\n\n\t\t\t\tend=time.perf_counter()\n\t\t\t\telapsed=end-inicio\n\n\t\t\t\tself.fps_stream=int((1/elapsed)*cicles)\n\t\t\t\t#print(count, cicles, elapsed,self.fps_stream)\n\t\t\t\tcount=0\n\t\tself.cap.release()\n\t\tself.finished=True\n\t\t\t\n\t\treturn\t\t\t\n\t\t\t\n\tdef read(self):\n\t\t# Return last readed frame and \"ret\" for validate.\n\t\treturn self.ret,self.frame\n\n\tdef release(self):\n\t\t# Stop the thread\n\t\tself.stop = True\n\n\tdef set(self,prop,value):\n\t\t# Property set\n\t\tself.cap.set(prop,value)\n\n\tdef get(self,prop):\n\t\t# Property get\n\t\tvalue = self.cap.get(prop)\n\t\treturn(value)\n\nif __name__ == \"__main__\":\n\t#Test\n\tcap=CameraStream()\n\t# fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n\t# cap.set(cv2.CAP_PROP_FOURCC, fourcc)\n\t# cap.set(cv2.CAP_PROP_FPS,60)\n\tcap.start()\n\twhile True:\n\t\tret,img = cap.read()\n\t\tif ret:\n\t\t\tcv2.putText(img,'FPS:'+str(cap.fps_stream),(50,50),cv2.FONT_ITALIC,2,(0,255,0))\n\t\t\tcv2.imshow('Test window, press \"q\" to close window',img)\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\tbreak\t\n\t\telse:\n\t\t\tprint('Failed to retrieve frame from camera, exiting.....')\n\t\t\tbreak\n\tcap.release()\n\tprint('Test finished')\n" } ]
2
SainadhPuriparthi/git-test
https://github.com/SainadhPuriparthi/git-test
22f42e0edd1a6b733070d8d2918715619cec7291
45a3adc7aac1d21d8c78e98f9dce0a9d35ffdddb
b7e7acde1f9ae1d5130b1b0917823310836abc9a
refs/heads/master
2020-03-30T16:27:15.775337
2018-10-03T13:04:16
2018-10-03T13:04:16
151,409,704
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5245097875595093, "alphanum_fraction": 0.5539215803146362, "avg_line_length": 16.545454025268555, "blob_id": "5e71d15791de7e38534ce62a56307ff37ab6238d", "content_id": "d14279cc94553e940cddc4009432c85bee75830c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 85, "num_lines": 11, "path": "/1.py", "repo_name": "SainadhPuriparthi/git-test", "src_encoding": "UTF-8", "text": "\"\"\"\r\n\"\"\"\r\ns=[\"open source\",\"libraries\",\"portal\",\"object oriented\",\"embededable\",\"easy to larn\"]\r\n\r\n#str1 = 'Bangalore'\r\n \r\ny = []\r\nfor str1 in s:\r\n print str1[0]\r\n y.append(str1[0])\r\nprint(y)\r\n" } ]
1
Nandan-M-Hegde/Recommender-System
https://github.com/Nandan-M-Hegde/Recommender-System
fd7eea7738f458826aec29b0eba316a894eacfae
d9b3d02ba7afd0f7e3f9201672fb4a52f6975259
f3afe67c7d0978b2e43ae586eacbfafb88993204
refs/heads/master
2020-03-22T20:10:58.686567
2018-07-11T13:38:34
2018-07-11T13:38:34
140,579,079
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5714608430862427, "alphanum_fraction": 0.5773312449455261, "avg_line_length": 37.900901794433594, "blob_id": "1dc199c580ef8ed1d417d403504c88092b197cef", "content_id": "ef62694eb6539e1b9b8dd6fe0b6d6b95fa2cfe66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4429, "license_type": "no_license", "max_line_length": 112, "num_lines": 111, "path": "/Recommenders.py", "repo_name": "Nandan-M-Hegde/Recommender-System", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\nclass Popularity_Recommender:\r\n def __init__(self, train_data, user_id, item_id):\r\n self.train_data = train_data\r\n self.user_id = user_id\r\n self.item_id = item_id\r\n self.recommendations = None\r\n return\r\n\r\n def Create(self):\r\n train_data_grouped = self.train_data.groupby([self.item_id]).agg({self.user_id : 'count'}).reset_index()\r\n train_data_grouped.rename(columns={'user_id':'score'}, inplace=True)\r\n\r\n train_data_sort = train_data_grouped.sort_values([\"score\", self.item_id], ascending=[0,1])\r\n train_data_sort[\"Rank\"] = train_data_sort[\"score\"].rank(ascending=0, method=\"first\")\r\n\r\n self.recommendations = train_data_sort.head(10)\r\n return\r\n\r\n def Recommend(self, user_id):\r\n user_recommendations = self.recommendations\r\n user_recommendations['user_id'] = user_id\r\n\r\n cols = user_recommendations.columns.tolist()\r\n cols = cols[-1:] + cols[:-1]\r\n user_recommendations = user_recommendations[cols]\r\n\r\n return user_recommendations\r\n\r\nclass Similarity_Recommender:\r\n def __init__(self, train_data, user_id, item_id):\r\n self.train_data = train_data\r\n self.user_id = user_id\r\n self.item_id = item_id\r\n self.cooccurence_matrix = None\r\n self.songs_dict = None\r\n self.rev_songs_dict = None\r\n self.recommendations = None\r\n return\r\n\r\n def Get_User(self, user):\r\n user_data = self.train_data[self.train_data[self.user_id] == user]\r\n user = list(user_data[self.item_id].unique())\r\n return user\r\n\r\n def Get_Item(self, item):\r\n item_data = self.train_data[self.train_data[self.item_id] == item]\r\n item = set(item_data[self.user_id].unique())\r\n return item\r\n\r\n def Get_Unique_Data(self):\r\n return list(self.train_data[self.item_id].unique())\r\n\r\n def Construct_Cooccurrence_Matrix(self, user_songs, all_songs):\r\n users = []\r\n\r\n for a_song in user_songs:\r\n users.append(self.Get_Item(a_song))\r\n\r\n cooccurence_matrix = np.matrix(np.zeros(shape=(len(user_songs), len(all_songs))), float)\r\n\r\n for i in range(0, len(all_songs)):\r\n songs_i = self.train_data[self.train_data[self.item_id] == all_songs[i]]\r\n users_i = set(songs_i[self.user_id].unique())\r\n \r\n for j in range(0,len(user_songs)): \r\n users_j = users[j]\r\n users_intersection = users_i.intersection(users_j)\r\n \r\n if len(users_intersection) != 0:\r\n users_union = users_i.union(users_j)\r\n cooccurence_matrix[j,i] = float(len(users_intersection))/float(len(users_union))\r\n else:\r\n cooccurence_matrix[j,i] = 0\r\n \r\n self.cooccurence_matrix = cooccurence_matrix\r\n return\r\n\r\n def Generate_Top_Recommendations(self, user, user_songs, all_songs):\r\n print(\"Non-zero values in cooccurence_matrix :%d\" % np.count_nonzero(self.cooccurence_matrix))\r\n \r\n user_sim_scores = self.cooccurence_matrix.sum(axis=0)/float(self.cooccurence_matrix.shape[0])\r\n user_sim_scores = np.array(user_sim_scores)[0].tolist()\r\n \r\n sort_index = sorted(((e,i) for i,e in enumerate(list(user_sim_scores))), reverse=True)\r\n \r\n df = pd.DataFrame(columns=['user_id', 'song', 'score', 'rank'])\r\n \r\n rank = 1 \r\n for i in range(0, len(sort_index)):\r\n if ~np.isnan(sort_index[i][0]) and all_songs[sort_index[i][1]] not in user_songs and rank <= 10:\r\n df.loc[len(df)]=[user,all_songs[sort_index[i][1]],sort_index[i][0],rank]\r\n rank = rank+1\r\n \r\n if df.shape[0] == 0:\r\n print(\"The current user has no songs for training the item similarity based recommendation model.\")\r\n return -1\r\n \r\n return df\r\n\r\n def Recommend(self, user):\r\n user_songs = self.Get_User(user)\r\n print(\"No. of unique songs for the user: %d\" % len(user_songs))\r\n \r\n all_songs = self.Get_Unique_Data()\r\n print(\"No. of unique songs in the training set: %d\" % len(all_songs))\r\n \r\n self.Construct_Cooccurrence_Matrix(user_songs, all_songs)\r\n return self.Generate_Top_Recommendations(user, user_songs, all_songs)\r\n" }, { "alpha_fraction": 0.7833333611488342, "alphanum_fraction": 0.7833333611488342, "avg_line_length": 19, "blob_id": "42304544e6e68aa1eec506d210f62c0c8a9a031a", "content_id": "6547d7602dd67ee5860e80aae9d6a991499fbbc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 37, "num_lines": 3, "path": "/README.md", "repo_name": "Nandan-M-Hegde/Recommender-System", "src_encoding": "UTF-8", "text": "# Recommender-System\n\n A simple song recommender in python\n" }, { "alpha_fraction": 0.5239388942718506, "alphanum_fraction": 0.5324278473854065, "avg_line_length": 37.82432556152344, "blob_id": "5d047bd0a4b046461b2684d02eff88b3aa94d754", "content_id": "6e09a6e7a8b0fcfa74d89f3055ff1f295a759e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2945, "license_type": "no_license", "max_line_length": 107, "num_lines": 74, "path": "/main.py", "repo_name": "Nandan-M-Hegde/Recommender-System", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nimport Recommenders as Recommenders\r\n\r\nclass Recommender:\r\n def __init__(self):\r\n self.db = None\r\n self.song_grouped = None\r\n self.users = None\r\n self.songs = None\r\n self.test_data = None\r\n self.train_data = None\r\n self.Popularity_Model = None\r\n return\r\n\r\n def Load_Data(self):\r\n db1 = pd.read_csv(\"10000.txt\", sep='\\t', header=None, names=[\"user_id\", \"song_id\", \"listen_count\"])\r\n db2 = pd.read_csv(\"song_data.csv\")\r\n self.db = pd.merge(db1, db2.drop_duplicates([\"song_id\"]), on=\"song_id\", how=\"left\")\r\n return\r\n\r\n def Transform_Data(self):\r\n self.db = self.db.head(10000)\r\n self.db['song'] = self.db['title'].map(str) + \" - \" + self.db['artist_name']\r\n self.song_grouped = self.db.groupby([\"song\"]).agg({\"listen_count\": \"count\"}).reset_index()\r\n grouped_sum = self.song_grouped[\"listen_count\"].sum()\r\n self.song_grouped['percentage'] = self.song_grouped[\"listen_count\"].div(grouped_sum)*100\r\n self.song_grouped.sort_values([\"listen_count\", \"song\"], ascending = [0,1])\r\n return\r\n\r\n def Find_Unique(self):\r\n self.users = self.db[\"user_id\"].unique()\r\n self.songs = self.db[\"song\"].unique()\r\n return\r\n \r\n def Split_Data(self):\r\n self.train_data, self.test_data = train_test_split(self.db, test_size = 0.20, random_state=0)\r\n return\r\n\r\n def Create_Model(self):\r\n self.Popularity_Model = Recommenders.Popularity_Recommender(self.train_data, \"user_id\", \"song\")\r\n self.Popularity_Model.Create()\r\n\r\n def Get_Popular_Recommendations(self, user_id):\r\n if user_id<1 and user_id>=len(self.users):\r\n print(\"Enter valid user id\")\r\n return\r\n \r\n return self.Popularity_Model.Recommend(user_id)\r\n\r\n def Get_Similar_Recommendations(self, user_id):\r\n self.Similarity_Model = Recommenders.Similarity_Recommender(self.train_data, \"user_id\", \"song\")\r\n \r\n user_items = self.Similarity_Model.Get_User(user_id)\r\n print(\"------------------------------------------------------------------------------------\")\r\n print(\"Training data songs for the user userid: %s:\" % user_id)\r\n print(\"------------------------------------------------------------------------------------\")\r\n\r\n for user_item in user_items:\r\n print(user_item)\r\n\r\n print(\"----------------------------------------------------------------------\")\r\n print(\"Recommendation process going on:\")\r\n print(\"----------------------------------------------------------------------\")\r\n print(self.Similarity_Model.Recommend(self.users[user_id]))\r\n\r\nR = Recommender()\r\nR.Load_Data()\r\nR.Transform_Data()\r\nR.Find_Unique()\r\nR.Split_Data()\r\nR.Create_Model()\r\nR.Get_Similar_Recommendations(5)" } ]
3
ITGGot-Eliam-Lundberg/Uppgift---Ra-knare
https://github.com/ITGGot-Eliam-Lundberg/Uppgift---Ra-knare
4809cc9d1e5c1da86a0b32772c8cc3b66d376aa3
f6064293e8776a20a352216868faca84d501ba1e
907b31bbe62bc695807022800ec0453159e195c2
refs/heads/master
2021-01-17T22:06:52.482612
2015-09-28T06:56:49
2015-09-28T06:56:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.569420337677002, "alphanum_fraction": 0.5846928358078003, "avg_line_length": 47.83898162841797, "blob_id": "ce02bc3ca4bce3dcf5d4c6ad00c53ddb40758c22", "content_id": "138e1aec36f81d314a68a85620faed414ff54c7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5850, "license_type": "no_license", "max_line_length": 228, "num_lines": 118, "path": "/readme.md", "repo_name": "ITGGot-Eliam-Lundberg/Uppgift---Ra-knare", "src_encoding": "UTF-8", "text": "# Rรคknare #\n\nDen hรคr uppgiften gรฅr ut pรฅ att skriva ett program som returnerar en strรคng med tal frรฅn och med ett starttal till och med ett sluttal.\n\n## Bedรถmningsmatris ##\n\n### Planering ###\n\n| Fรถrmรฅgor | E \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t | C | A |\n|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---|---|\n| Aktivitetsdiagram och pseudokod | Du anvรคnder pseudokod och/eller aktivitetsdiagram fรถr att planera dina uppgifter utifrรฅn exempel, eller i samrรฅd med utbildaren. | Som fรถr E, men utan exempel eller handledning | |\n\n### Syntax och Teori ###\n| Fรถrmรฅgor | E \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| C | A |\n|------------------------------------------------|------------------------------------------------------------------------------|---|---|\n| Grundlรคggande syntax\t\t | Du kan redogรถra fรถr och anvรคnda programmeringssprรฅkets grundlรคggande syntax | | |\n| Villkor och IF-satser\t\t | Du kan redogรถra fรถr och anvรคnda villkor och IF-satser | | |\n| Loopar & iteration | Du kan redogรถra fรถr och anvรคnda loopar och iterera รถver listor | | |\n\n### Kodning och kodningsstil ###\n\n| Fรถrmรฅgor | E | C | A |\n|-----------------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------|------------------------------------------------|\n| Komplexitet\t\t\t\t\t\t\t\t\t| **Du kan skriva enkla program** | Du kan skriva lite mer avancerade program | Du kan skriva komplexa program\n| Sekventiell- & funktionsbaserad programmering | Du anvรคnder dig av sekventiell programmering och fรถrdefinerade funktioner | **Du skapar och anvรคnder enkla funktioner** | Du skapar mer komplexa funktioner |\n| Struktur\t\t \t\t\t\t | Du skriver kod som รคr delvis strukturerad, har en konsekvent kodningsstil och tydlig namngivning | Som fรถr E, men du skriver kod som รคr helt strukturerad | \t\t\t |\n| Felsรถkning | Du felsรถker pรฅ egen hand enkla syntaxfel | Som fรถr E, men systematiskt, och dessutom รคven kรถrtidsfel och programmeringslogiska fel | Som fรถr C, men med effektivitet \t |\n\n## Uppgiftsbeskrivning ##\n\nDen hรคr uppgiften gรฅr ut pรฅ att skriva ett program som returnerar alla tal frรฅn och med ett starttal till och med ett sluttal.\n\nDu skall skriva en funktion: `count` som tar tvรฅ parametrar (`start` och `stop`)\n\nFunktionen skall sen returnera ut en komma separerad strรคng med alla tal frรฅn och med starttalet till och med sluttalet.\n\n### Exempel ###\n\n#### Ruby ####\n\n\tcount(start: 3, stop: 5)\n\t#=> \"3,4,5\"\n\n\tcount(start: 2, stop: -5)\n\t#=> \"2,1,0,-1,-2,-3,-4,-5\"\n\t\n\tcount(start: 1, stop: 1)\n\t#=> \"1\"\n\n#### Python ####\n\n\tcount(start=3, stop=5 )\n\t>>> '3,4,5'\n\n\tcount(start: 2, stop: -5)\n\t>>> '2,1,0,-1,-2,-3,-4,-5'\n\t\n\tcount(start: 1, stop: 1)\n >>> '1'\n\n## Genomfรถrande ##\n\n### Versionshantering ###\n\nGรถr en `fork` av repot. Klona sen ner till din dator. Kom ihรฅg att checka in dina รคndringar och synka med GitHub.\n\n### Flรถdesschema ###\n\nInnan du bรถrjar koda ska du skapa ett flรถdesschema fรถr programmet.\nNรคr du kรคnner att du har ett fungerande flรถdesschema, be lรคraren att kolla pรฅ det.\n\n### Kodning ###\n\nProgrammet skall utvecklas med hjรคlp av testerna.\n\n##### Ruby #####\n\nKรถr `bundle install` fรถr att installera alla dependencies (och `rbenv rehash` om rspec inte redan var installerat)\n\nSkapa funktionen i `lib/count.rb`\n\nTesterna finns i `spec/count_test.rb`\n\nKรถr `ruby spec/count_test.rb` fรถr att kรถra testerna.\n\n##### Python #####\n\nSkapa funktionen i `lib/count.py`\n\nTesterna finns i `test/count_test.py`\n\nKรถr `nosetests --rednose` fรถr att kรถra testerna.\n\n## Tips och lรคnkar ##\n\n* Om du inte kan beskriva lรถsningen i ord kommer det vara sรฅ gott som omรถjligt att skapa ett flรถdesschema\n* Fundera pรฅ vilka variabler som behรถvs\n* Testa flรถdesschemat med hjรคlp av penna och papper\n\n### Ruby ###\n\nLรคs mer om villkor och if-satser i รถvning 27-30 och while-loopar i รถvning 33 i [*Learn Ruby the Hard Way*](http://ruby.learncodethehardway.org/book)\n\n* [Learn Ruby the Hard Way - Exercise 27: Memorizing Logic](http://ruby.learncodethehardway.org/book/ex27.html)\n* [Learn Ruby the Hard Way - Exercise 28: Boolean Practice](http://ruby.learncodethehardway.org/book/ex28.html)\n* [Learn Ruby the Hard Way - Exercise 29: What If](http://ruby.learncodethehardway.org/book/ex29.html)\n* [Learn Ruby the Hard Way - Exercise 30: Else and If](http://ruby.learncodethehardway.org/book/ex30.html)\n* [Learn Ruby the Hard Way - Exercise 33: While loops](http://ruby.learncodethehardway.org/book/ex33.html)\n\n### Python ###\n\nLรคs mer om villkor och if-satser i รถvning 27-30 och while-loopar i รถvning 33 i [*Learn Python the Hard Way*](http://learnpythonthehardway.org)\n\n* [Learn Python the Hard Way - Exercise 27: Memorizing Logic](http://learnpythonthehardway.org/book/ex27.html)\n* [Learn Python the Hard Way - Exercise 28: Boolean Practice](http://learnpythonthehardway.org/book/ex28.html)\n* [Learn Python the Hard Way - Exercise 29: What If](http://learnpythonthehardway.org/book/ex29.html)\n* [Learn Python the Hard Way - Exercise 30: Else and If](http://learnpythonthehardway.org/book/ex30.html)\n* [Learn Python the Hard Way - Exercise 33: While loops](http://learnpythonthehardway.org/book/ex33.html)" }, { "alpha_fraction": 0.572519063949585, "alphanum_fraction": 0.6450381875038147, "avg_line_length": 16.5, "blob_id": "b5ffdd8475684906915c867dea70030183470db2", "content_id": "7467412c9a2d2fb2e4c8661eb4d9f1c9e4f8ff04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 66, "num_lines": 30, "path": "/test/count_test.py", "repo_name": "ITGGot-Eliam-Lundberg/Uppgift---Ra-knare", "src_encoding": "UTF-8", "text": "#encoding: utf-8\n\nfrom nose.tools import *\nimport sys\nsys.path.append('../lib')\n\nfrom count import count\n\n\n\n\n@raises(TypeError)\ndef test_count_takes_two_argument():\n count()\n count(start=3)\n\n\ndef test_count_from_3_to_5_should_give_3_4_5():\n\n assert_equal(count(start=3,stop=5), \"3,4,5\")\n\n\ndef test_count_from_2_to_negative_5_should_give_2_1_0_1_2_3_4_5():\n\n assert_equal(count(start=2, stop=-5), \"2,1,0,-1,-2,-3,-4,-5\")\n\n\ndef test_count_from_1_to_1_should_give_1():\n\n assert_equal(count(start=1, stop=1), \"1\")" }, { "alpha_fraction": 0.5900962948799133, "alphanum_fraction": 0.6409903764724731, "avg_line_length": 25.962963104248047, "blob_id": "438687a23e244cd13d8063df494d91c50a98a079", "content_id": "28ceaa1421143a4d66d3feddbf77e2ec91c98582", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 727, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/spec/count_test.rb", "repo_name": "ITGGot-Eliam-Lundberg/Uppgift---Ra-knare", "src_encoding": "UTF-8", "text": "require 'minitest'\nrequire 'minitest/autorun'\nrequire 'minitest/reporters'\nrequire_relative '../lib/count'\n\nMinitest::Reporters.use! [Minitest::Reporters::SpecReporter.new]\n\ndescribe 'count' do\n\n it 'takes take start and stop as arguments' do\n proc { count() }.must_raise ArgumentError\n proc { count(stop:5) }.must_raise ArgumentError\n end\n\n it 'returns \"3,4,5\" if start is 3 and stop is 5' do\n count(start:3,stop:5).must_equal '3,4,5'\n end\n\n it 'returns \"2,1,0,-1,-2,-3,-4.-5\" if start is 2 and stop is -5' do\n count(start:2,stop:-5).must_equal '2,1,0,-1,-2,-3,-4,-5'\n end\n\n it 'should return \"1\" if start is 1 and stop is 1' do\n count(start:1,stop:1).must_equal '1'\n end\n\nend" } ]
3
Ludaxord/PythonTools
https://github.com/Ludaxord/PythonTools
fd6fdee5a21ac29c678836c1c764175eee4e5165
45e64d85d2ebb7d695538f9c3f3f3796ff77bcb9
0fa3de7dd1ec8869252f5a48c10c00930b6dd146
refs/heads/master
2020-12-30T08:37:18.308639
2020-04-13T20:32:13
2020-04-13T20:32:13
238,930,763
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5812743902206421, "alphanum_fraction": 0.5864759683609009, "avg_line_length": 31.04166603088379, "blob_id": "366cadd77add803faf67a13bb366a78469f29349", "content_id": "81278ea801b7deeb8ac9c369c5997e8e3a157b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1538, "license_type": "no_license", "max_line_length": 81, "num_lines": 48, "path": "/web/youtube_downloader.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import time\n\nimport youtube_dl\nfrom moviepy.editor import *\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nclass YoutubeDownloader:\n\n def get_videos_and_convert(self, urls, file_path, audio_path):\n for url in urls:\n print(f\"starting download... {url}\")\n successful = False\n while not successful:\n try:\n self.get_video_with_youtube_dl(url, file_path)\n successful = True\n except Exception as e:\n print(f\"problem with downloading taking sleep, {e}\")\n time.sleep(1000)\n\n files = self.get_videos_list_from_dir(file_path)\n for file in files:\n self.convert_mp4_to_mp3(f\"{file_path}/{file}\", audio_path)\n\n def convert_mp4_to_mp3(self, movie_path, audio_path):\n video = VideoFileClip(movie_path)\n video.audio.write_audiofile(audio_path)\n\n def get_video_with_youtube_dl(self, url, file_path, urls=None):\n ydl_opts = {\n 'outtmpl': os.path.join(file_path, '%(title)s-%(id)s.%(ext)s'),\n }\n if urls is None:\n urls = [url]\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download(urls)\n\n def get_videos_list_from_dir(self, path):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n return files\n\n# Usage\n# urls = []\n# main_path = \"\"\n# youtube_downloader = YoutubeDownloader()\n# youtube_downloader.get_video_with_youtube_dl(None, f\"{main_path}/videos\", urls)\n" }, { "alpha_fraction": 0.5735909938812256, "alphanum_fraction": 0.5764895081520081, "avg_line_length": 28.855770111083984, "blob_id": "d8a494209f39c89818d7047c4bdf074a1e1bf3e1", "content_id": "f85492b21b85276d8345510ced0d4ec723933819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3105, "license_type": "no_license", "max_line_length": 109, "num_lines": 104, "path": "/network/nord_vpn_connect.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport io\nimport random\nimport sched\nimport time\nfrom subprocess import Popen, PIPE\n\nimport psutil\n\nfrom utils.arg_class import ArgClass\nfrom utils.parser import Parser\n\n\nclass NordVpnConnect(ArgClass):\n pid = -1\n country_code = None\n s = sched.scheduler(time.time, time.sleep)\n\n def __init__(self, countrycode=None):\n self.country_code = countrycode\n\n def __args(self):\n return Parser(args=[\n {\"command\": \"--country_code\", \"type\": str, \"help\": \"set initial country code to connect\"},\n {\"command\": \"--change_time\", \"type\": int, \"help\": \"set time to change connection server\"}\n ]).get_args()\n\n def get_args(self):\n args = self.__args()\n countrycode = args.country_code\n change_time = args.change_time\n return countrycode, change_time\n\n def connect_to_open_pyn(self):\n print(\"-------------- connecting to open pyn --------------\")\n openpyn_init = Popen(['sudo', 'openpyn', '--init'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n openpyn_init.communicate()\n time.sleep(2)\n\n def get_country_code(self, file_name=None):\n if file_name is None:\n file_name = f\"{self.get_current_dir_path()}/country.txt\"\n l = list()\n with io.open(file_name, 'r', encoding='utf-8') as f:\n for line in f:\n if line != '\\n':\n l.append(line.replace(\"\\n\", \"\"))\n return l\n\n def get_random_country_code(self):\n random_list = self.get_country_code()\n random_code = random.choice(random_list)\n return random_code\n\n def connect_to_nord_vpn(self, command='openpyn', country='pl'):\n print(\"-------------- connecting to nord vpn --------------\")\n openpyn_output = Popen([command, country])\n pid = openpyn_output.pid\n openpyn_output.communicate()\n\n def run(self):\n self.kill_nord_vpn()\n if self.country_code is None:\n random_value = self.get_random_country_code()\n else:\n random_value = self.country_code\n print(random_value)\n self.connect_to_nord_vpn(country=random_value)\n\n def main(self, countrycode=None):\n if self.country_code is None:\n self.country_code = countrycode\n self.run()\n\n def kill_nord_vpn(self):\n if self.pid != -1:\n print(f\"killing process on pid {self.pid}\")\n p = psutil.Process(self.pid)\n p.kill()\n else:\n print(f\"there are no running process on pid {self.pid}\")\n\n\nnord_vpn = NordVpnConnect()\n\ncountry_code, change_time = nord_vpn.get_args()\n\n\ndef run_hourly(sc):\n nord_vpn.main(country_code)\n print(\"running nordvpn hourly\")\n s.enter(60, 1, run_hourly, (sc,))\n\n\nprint(f\"-------------- country code => {country_code} --------------\")\nprint(f\"-------------- change time => {change_time} --------------\")\n\nif change_time is not None:\n s = sched.scheduler(time.time, time.sleep)\n s.enter(change_time, 1, run_hourly, (s,))\n s.run()\nelse:\n nord_vpn.main(country_code)\n" }, { "alpha_fraction": 0.6081081032752991, "alphanum_fraction": 0.6081081032752991, "avg_line_length": 16.41176414489746, "blob_id": "cc7e6004a91cd319405bc98b7d3a09c76f311a62", "content_id": "b5bee4bd18e2ecc4ceefaff1c01d1cbca655a3d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 55, "num_lines": 17, "path": "/utils/arg_class.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import pathlib\nfrom abc import ABC\n\n\nclass ArgClass(ABC):\n\n def get_file_path(self):\n return pathlib.Path(__file__).parent.absolute()\n\n def get_current_dir_path(self):\n return pathlib.Path().absolute()\n\n def get_args(self):\n pass\n\n def __args(self):\n pass\n" }, { "alpha_fraction": 0.7305428385734558, "alphanum_fraction": 0.7338129281997681, "avg_line_length": 28.403846740722656, "blob_id": "2e30d7a358ff4415edf29fdcbc9478b6ebaf2515", "content_id": "d96e0e6831b0d57d409efd1ecf7e37fe1451e23b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1529, "license_type": "no_license", "max_line_length": 162, "num_lines": 52, "path": "/README.md", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "# Python Tools\nUseful tools for work automation written in Python\n\n# web\n## `curl_call.py`\n## `firebase_messaging_call.py`\n\n# utils\n## `arg_class.py`\n## `parser.py`\n\n# network\n## `nord_vpn_connect.py`\n\n# hardware\n## `cpu_stress.py`\n## `ram_clean.py`\n## `sys_details.py`\n\n# filesystem\n \n## `meta_fader.py`\nSimple library to remove and display MetaData of Image.\n### Pre-requirements:\n* Python (Tested on Python 3.6)\n* CMD/Terminal\n### Usage\n`python filesystem/meta_fader.py --path <path_to_file> --new_path <path_to_new_file>`\n### Docs\n### MetaFader\nMetaFader defines main logic of library. As a constructor it except (required) string argument with path of file user want to scan.\n\n`get_meta(image)` - create dictionary with exif metadata. Except required argument with path to file.\n\n`meta_display()` - create dictionary with exif metadata. If user do not pass any arguments it takes argument from constructor. Additional Arguments: path to file.\n\n`meta_remove()` - create new file with removed exif metadata. Only required argument is save_file. To use if user want to save new file without exif data.\n\n### Parser\n\nParser defines terminal arguments required to run script. \n\n`meta_display_args()` - create argument list:\n\n* path - to define path to scan file.\n* new_path - to define new path to file with removed metadata.\n\n# All tools setup\n\nAll script using python 3.6.5. It was not tested with any other versions of python.\n\nNOTE: before running any of scripts install all packages with `pip` using `requirements.txt` file.\n" }, { "alpha_fraction": 0.8607594966888428, "alphanum_fraction": 0.8734177350997925, "avg_line_length": 7.777777671813965, "blob_id": "ee36b9b87da8b49254c118e407b07ebd5b8aa3e7", "content_id": "60f9abd796871d92afa78dbeda97a12ae409d4d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 79, "license_type": "no_license", "max_line_length": 14, "num_lines": 9, "path": "/requirements.txt", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "beautifulsoup4\nyoutube_dl\nmoviepy\npytube\npandas\nPillow\ngpuinfo\npsutil\nrequests\n" }, { "alpha_fraction": 0.559335470199585, "alphanum_fraction": 0.5628955960273743, "avg_line_length": 29.095237731933594, "blob_id": "a92245973822d3a781dc0087cf22147bff33be2d", "content_id": "b4dd66e596fb2f0d8968e9fc50a69d1c4ae88e5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2528, "license_type": "no_license", "max_line_length": 99, "num_lines": 84, "path": "/filesystem/meta_fader.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom PIL import Image, ExifTags\n\nfrom utils.arg_class import ArgClass\nfrom utils.parser import Parser\n\n\nclass MetaFader(ArgClass):\n file_path = None\n\n def __init__(self, file_path=None):\n if file_path is not None:\n self.file_path = file_path\n\n def __args(self):\n return Parser(args=[{\"command\": \"--path\", \"type\": str, \"help\": \"full path to image file\"},\n {\"command\": \"--new_path\", \"type\": str,\n \"help\": \"full path to new image file\"}]).get_args()\n\n def get_args(self):\n args = self.__args()\n path = args.path\n new_file_path = args.new_path\n return path, new_file_path\n\n def meta_display(self, file_path=None):\n if file_path is None:\n file_path = self.file_path\n image = Image.open(file_path)\n exif = self.get_meta(image)\n return exif\n\n def get_meta(self, image):\n try:\n exif = {ExifTags.TAGS[k]: v for k, v in image._getexif().items() if k in ExifTags.TAGS}\n except Exception as e:\n print(f\"{e}\")\n exif = {}\n return exif\n\n def decode_maker_note(self, exif):\n maker_note = exif.get(\"MakerNote\")\n maker_notes = [maker_note[i:i + 1] for i in range(0, len(maker_note), 1)]\n encodings = ['utf-8', 'utf-16', 'ascii', 'base64']\n print(f\"encoding {encodings}\")\n decoded = []\n for encoding in encodings:\n decoded_note = \"\"\n for note in maker_notes:\n try:\n decoded_note += note.decode(encoding)\n except Exception as e:\n exception = e\n decoded.append(decoded_note)\n return decoded\n\n def meta_remove(self, save_file, file_path=None, new_file_path=None):\n if file_path is None:\n file_path = self.file_path\n image = Image.open(file_path)\n data = list(image.getdata())\n removed_data_image = Image.new(image.mode, image.size)\n removed_data_image.putdata(data)\n if save_file:\n if new_file_path is None:\n new_file_path = file_path\n removed_data_image.save(new_file_path)\n return removed_data_image\n\n\nmeta = MetaFader()\n\npath, new_file_path = meta.get_args()\n\nexif = meta.meta_display(file_path=path)\n\nprint(exif)\n\nnew_image = meta.meta_remove(True, new_file_path=new_file_path)\n\nnew_exif = meta.meta_display(file_path=new_file_path)\n\nprint(new_exif)\n" }, { "alpha_fraction": 0.5202527046203613, "alphanum_fraction": 0.5336306095123291, "avg_line_length": 30.29069709777832, "blob_id": "94bad8b6a8b37aae69faf4dd0395762f47d51ad1", "content_id": "8793346b9886cc87e2903986ee8171a4cba93761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2691, "license_type": "no_license", "max_line_length": 68, "num_lines": 86, "path": "/hardware/sys_details.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import os\nimport platform\nimport re\nimport subprocess\nimport sys\n\nimport psutil\nfrom gpuinfo import GPUInfo\n\n\nclass SysDetails:\n\n def operating_system_info(self, with_print=False):\n operating_system = sys.platform\n if with_print:\n print(f\"Operating system: {operating_system}\")\n if operating_system.lower() == \"darwin\":\n cmd = \"system_profiler SPSoftwareDataType\"\n stream = os.popen(cmd)\n output = stream.read()\n if with_print:\n print(output)\n return operating_system\n\n def cpu_name(self):\n processor = platform.processor()\n print(f\"CPU name: {processor}\")\n return processor\n\n def usage(self):\n try:\n cpu = psutil.cpu_percent()\n memory = dict(psutil.virtual_memory()._asdict())\n memory[\"total_gb\"] = memory[\"total\"] / 1024 ** 3\n memory[\"available_gb\"] = memory[\"available\"] / 1024 ** 3\n memory[\"used_gb\"] = memory[\"used\"] / 1024 ** 3\n memory[\"free_gb\"] = memory[\"free\"] / 1024 ** 3\n memory[\"active_gb\"] = memory[\"active\"] / 1024 ** 3\n memory[\"inactive_gb\"] = memory[\"inactive\"] / 1024 ** 3\n memory[\"wired_gb\"] = memory[\"wired\"] / 1024 ** 3\n print(f\"cpu usage {cpu}\")\n print(f\"memory usage {memory}\")\n return cpu, memory\n except Exception as e:\n print(e)\n\n def gpu_info(self):\n try:\n gpu = GPUInfo.check_empty()\n print(f\"GPU name: {gpu}\")\n return gpu\n except Exception as e:\n print(e)\n\n def cpu_name_by_command(self):\n system = self.operating_system_info().lower()\n if system == \"windows\":\n name = self.cpu_name()\n print(name)\n return name\n elif system == \"darwin\":\n cmd = \"sysctl -n machdep.cpu.brand_string\"\n stream = os.popen(cmd)\n output = stream.read()\n print(output)\n return output\n elif system == \"linux\":\n cmd = \"cat /proc/cpuinfo\"\n info = subprocess.check_output(cmd, shell=True).strip()\n for line in info.split(\"\\n\"):\n if \"model name\" in line:\n name = re.sub(\".*model name.*:\", \"\", line, 1)\n print(name)\n return name\n else:\n name = self.cpu_name()\n print(name)\n return name\n\n\nsystem_details = SysDetails()\nsystem_details.operating_system_info(with_print=True)\nsystem_details.cpu_name()\nsystem_details.gpu_info()\nsystem_details.usage()\nsystem_details.cpu_name_by_command()\n" }, { "alpha_fraction": 0.5360065698623657, "alphanum_fraction": 0.5761047601699829, "avg_line_length": 30.33333396911621, "blob_id": "67121498905e08775ef458d3910be0129d32b11a", "content_id": "3266596095c3f46a10666441e9f7083d20a7c6e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 105, "num_lines": 39, "path": "/preprocessing/preprocess_data.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom utils.arg_class import ArgClass\n\n\nclass PreProcessData(ArgClass):\n encodings = ['utf-8', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256', 'cp1257',\n 'cp1258', 'iso8859_15', 'iso8859_16']\n\n def __init__(self, filename):\n self.file_name = filename\n pass\n\n def get_encoded_datasets(self, encodings=None):\n accepted_datasets = []\n if encodings is None:\n encodings = self.encodings\n for encoding in encodings:\n try:\n dataset = self.get_dataset(encoding)\n accepted_datasets.append(dataset)\n except Exception as e:\n print(e)\n return accepted_datasets\n\n def get_accepted_encodings(self):\n accepted_encodings = []\n for encoding in self.encodings:\n try:\n self.get_dataset(encoding)\n accepted_encodings.append(encoding)\n except Exception as e:\n print(e)\n return accepted_encodings\n\n def get_dataset(self, encoding):\n filename = self.file_name\n dataset = pd.read_csv(filename, delimiter=';', encoding=encoding)\n return dataset\n" }, { "alpha_fraction": 0.546543300151825, "alphanum_fraction": 0.5487207174301147, "avg_line_length": 30.672412872314453, "blob_id": "988660e52e1f0c64230d0753c668d3ab7e5cc5fe", "content_id": "717e14fa9344a34d66915f551d59f90779a89144", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1837, "license_type": "no_license", "max_line_length": 115, "num_lines": 58, "path": "/db/postgre.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import os\n\nimport psycopg2\nfrom psycopg2.extras import RealDictCursor\n\n\nclass Postgre:\n def __init__(self, host, database, user, password):\n self.host = host\n self.database = database\n self.user = user\n self.password = password\n\n def remove_file(self, filename):\n try:\n os.remove(filename)\n except OSError:\n pass\n\n def connect(self, with_query=None, with_factory=False):\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n conn = None\n try:\n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(host=self.host, database=self.database, user=self.user, password=self.password)\n\n # create a cursor\n if with_factory:\n cur = conn.cursor(cursor_factory=RealDictCursor)\n else:\n cur = conn.cursor()\n\n # execute a statement\n if with_query is not None:\n cur.execute(with_query)\n # return cur\n else:\n print('PostgreSQL database version:')\n cur.execute('SELECT version()')\n # display the PostgreSQL database server version\n db_version = cur.fetchone()\n print(db_version)\n # close the communication with the PostgreSQL\n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n # if with_query is None:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\n def save_to_sql(self, filename, filedata):\n with open(filename, 'a') as file:\n file.write(filedata)\n file.close()\n" }, { "alpha_fraction": 0.5472519993782043, "alphanum_fraction": 0.5475871562957764, "avg_line_length": 30.744680404663086, "blob_id": "9d46f0d7d6e4d61e6b009dec8f53fb94a212b7cd", "content_id": "ac4b36b1ce19b4262f1b17635aa00ac424b9657d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2984, "license_type": "no_license", "max_line_length": 102, "num_lines": 94, "path": "/web/firebase_messaging_call.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport json\n\nfrom utils.arg_class import ArgClass\nfrom web.curl_call import CurlCall\nfrom utils.parser import Parser, StoreDictKeyPair\n\n\nclass FirebaseMessagingCall(ArgClass):\n\n def __load_config(self):\n with open(f'{self.get_current_dir_path()}/config.json') as json_file:\n data = json.load(json_file)\n return data\n\n def __get_server_key(self):\n data = self.__load_config()\n return data[\"server_key\"]\n\n def __args(self):\n return Parser(\n args=[{\"command\": \"--to\", \"type\": str,\n \"help\": \"Firebase device token\"},\n {\"command\": \"--data\", \"action\": StoreDictKeyPair,\n \"help\": \"key value pair of data in notification, add keys like: title, body, url\"},\n {\"command\": \"--headers\", \"action\": StoreDictKeyPair,\n \"help\": \"headers send with request to firebase, this value is optional\"},\n {\"command\": \"--title\", \"type\": str,\n \"help\": \"title of notification\"},\n {\"command\": \"--body\", \"type\": str,\n \"help\": \"body of notification\"},\n {\"command\": \"--url\", \"type\": str,\n \"help\": \"url to open send with notification\"}\n ]).get_args()\n\n def __get_headers(self, arguments):\n server_key = self.__get_server_key()\n header = arguments.headers\n if header is None:\n header = {\n 'Authorization': f'key={server_key}',\n 'Content-Type': 'application/json'\n }\n return header\n\n def __get_notification(self, arguments):\n notification = {}\n title = arguments.title\n body = arguments.body\n notification[\"title\"] = title\n notification[\"body\"] = body\n return notification\n\n def __get_data(self, arguments):\n data = {}\n title = arguments.title\n body = arguments.body\n url = arguments.url\n data[\"title\"] = title\n data[\"body\"] = body\n data[\"url\"] = url\n return data\n\n def get_args(self):\n arguments = self.__args()\n payload = {}\n headers = self.__get_headers(arguments)\n to = arguments.to\n data = arguments.data\n url = arguments.url\n payload[\"to\"] = to\n if data is not None:\n payload[\"data\"] = data\n if data is None and url is None:\n notification = self.__get_notification(arguments)\n payload[\"notification\"] = notification\n elif data is None and url is not None:\n data = self.__get_data(arguments)\n payload[\"data\"] = data\n return headers, payload\n\n\nfirebase = FirebaseMessagingCall()\n\nheaders, payload = firebase.get_args()\n\nprint(f\"headers => {headers}\")\nprint(f\"payload => {payload}\")\n\nURL = 'https://fcm.googleapis.com/fcm/send'\n\ncurl = CurlCall()\n\nreq = curl.curl_call(URL, payload, headers)\n" }, { "alpha_fraction": 0.7636363506317139, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 12.75, "blob_id": "26ab7c4777f581c90c56af129e68f77c54d60aed", "content_id": "3e7a043b778604a312174423fcc5e3abec2ea1fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/hardware/ram_clean.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import gc\n\ncollection = gc.collect()\nprint(collection)\n" }, { "alpha_fraction": 0.6378600597381592, "alphanum_fraction": 0.6378600597381592, "avg_line_length": 23.299999237060547, "blob_id": "67233d1f8517fc650aff1ddae5a54572402ba627", "content_id": "91e5aee4a1e3583068790fb1a0e1fc011dd34817", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/web/curl_call.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import requests\n\n\nclass CurlCall:\n\n def curl_call(self, url, payload, headers, display_result=True):\n r = requests.post(url=url, json=payload, headers=headers)\n if display_result:\n print(r.content)\n return r\n" }, { "alpha_fraction": 0.5383057594299316, "alphanum_fraction": 0.5396557450294495, "avg_line_length": 44.58461380004883, "blob_id": "6e3e65bc595018d04c804e1526fb3032bad7b34b", "content_id": "788ad4184e880b31d3df35e55934bfe325e544e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2963, "license_type": "no_license", "max_line_length": 172, "num_lines": 65, "path": "/web/scrap_page.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\n\nfrom db.postgre import Postgre\nfrom web.curl_call import CurlCall\n\n\nclass ScrapPage:\n def __init__(self, url):\n self.url = url\n self.res = self.get_page()\n\n def get_page(self, url=None):\n curl = CurlCall()\n if url is None:\n url = self.url\n res = curl.curl_call(url, payload=None, headers=None, display_result=False)\n return res\n\n def get_beauty_page(self, res=None):\n if res is None:\n res = self.res.content\n html = BeautifulSoup(res, 'html.parser')\n return html\n\n @staticmethod\n def find_counties_locations():\n scrap_page = ScrapPage(\"https://www.worldatlas.com/webimage/countrys/namerica/us.htm\")\n html = scrap_page.get_beauty_page()\n page_hrefs = list()\n for a in html.select('a'):\n href: str = a[\"href\"]\n if \"/webimage/countrys/namerica/usstates/\" in href and \"https://www.worldatlas.com/\" not in href:\n href = href.replace(\".htm\", \"latlog.htm\")\n page_hrefs.append(f\"https://www.worldatlas.com{href}\")\n return page_hrefs\n\n def get_counties_locations(self):\n location_hrefs = ScrapPage.find_counties_locations()\n sql_file = \"/Volumes/LaCie/ProjectSup/PythonTools/resources/sqls/states_location.sql\"\n postgre = Postgre(\"localhost\", \"county\", \"postgres\", \"\")\n postgre.remove_file(sql_file)\n location_hrefs = list(set(location_hrefs))\n for i, location_href in enumerate(location_hrefs):\n print(f\"{location_href} => {i} of {len(location_hrefs)}\")\n try:\n res = self.get_page(location_href)\n page = self.get_beauty_page(res.content)\n sections = page.find_all(\"section\", class_=\"mapContLv2-left\")\n for section in sections:\n try:\n print(\"----------section-----------\")\n section.find(\"ul\").clear()\n clear_section = section.text.replace(\"LATITUDE & LONGITUDE:\", \"\").replace(\n \"RELATIVE LOCATION:\", \"\").lstrip()\n state: str = clear_section.rpartition(' Location of')[0]\n description = clear_section.split(\"Location of\")[1].lstrip()\n description: str = description.replace(\"'\", \"''\").replace(\"hemispheres\", \"\")\n sql = f\"INSERT INTO states_location(state, description, url) VALUES ('{state.lower()}', '{description}', '{location_href}') ON CONFLICT DO NOTHING;\"\n print(sql)\n postgre.save_to_sql(sql_file, sql + \"\\n\")\n print(\"----------------------------\")\n except Exception as e:\n print(f\"error in exception handler {e}\")\n except Exception as e:\n print(f\"error while scrapping, {e}\")\n" }, { "alpha_fraction": 0.5627009868621826, "alphanum_fraction": 0.5723472833633423, "avg_line_length": 19.064516067504883, "blob_id": "dc140f47c6730abd69a2fa79e989a046d28afeaf", "content_id": "572b4ae5ef55ab31af0328b00838a4f553962872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 51, "num_lines": 31, "path": "/hardware/cpu_stress.py", "repo_name": "Ludaxord/PythonTools", "src_encoding": "UTF-8", "text": "import signal\nfrom multiprocessing import cpu_count, Pool\n\n\nclass CPUStress:\n\n def __init__(self):\n self.stop_loop = 0\n\n def exit_child(self, x, y):\n self.stop_loop = 1\n\n def f(self, x):\n while not self.stop_loop:\n x * x\n\n def run(self):\n processes = cpu_count()\n print('-' * 20)\n print('Running load on CPU(s)')\n print('Utilizing %d cores' % processes)\n print('-' * 20)\n pool = Pool(processes)\n pool.map(self.f, range(processes))\n\n\ncpu_stress = CPUStress()\n\nsignal.signal(signal.SIGINT, cpu_stress.exit_child)\n\ncpu_stress.run()\n" } ]
14
SungPyo-LEE/NBA_Flask
https://github.com/SungPyo-LEE/NBA_Flask
0aec78217d04bb68d47f4dc4d1ea03cc2cb5c05e
fa54fad22c39bfd077dba236764b786011e71274
668db4d85bcfa94960eab8f6ce06b5b2d1e12549
refs/heads/master
2023-06-25T11:07:27.927891
2021-07-10T11:23:34
2021-07-10T11:23:34
222,816,819
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.604651153087616, "alphanum_fraction": 0.6279069781303406, "avg_line_length": 27.66666603088379, "blob_id": "d23f924e6050cdcd3402b8ddd6f071061e0c92a9", "content_id": "5ec65e5da463d1e13855b6e369f6fd08e7dea02a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 262, "license_type": "no_license", "max_line_length": 67, "num_lines": 6, "path": "/README.md", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "# NBA_chatbot_project\n\n### 1. DB์— NBA Team// ํŒ€๋ณ„ Roaster// Roaster ์•ˆ์— ์žˆ๋Š” ํ”Œ๋ ˆ์ด์–ด Stat // ์ง์ „ ๊ฒŒ์ž„\n### 2. AWS MySQL ์‚ฌ์šฉ, Flask ํ”„๋ ˆ์ž„์›Œํฌ ์ด์šฉํ•ด์„œ DB์™€ ์—ฐ๊ฒฐ\n### 3. ํ”„๋ก ํŠธ ํŽ˜์ด์ง€ ์—ฐ๋™\n### 4. ํŽ˜์ด์Šค๋ถ ์ฑ—๋ด‡ ์—ฐ๋™\n" }, { "alpha_fraction": 0.6275861859321594, "alphanum_fraction": 0.6348658800125122, "avg_line_length": 34.73239517211914, "blob_id": "703c5c6f9466c444213f8a678d5ce032009e570c", "content_id": "2e3989cf547487fe3ade562bfe0dc734b23c7f1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2658, "license_type": "no_license", "max_line_length": 121, "num_lines": 71, "path": "/view/__init__.py", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "from flask import request, jsonify, render_template, Flask, send_file, make_response\r\nfrom functools import wraps\r\n\r\n#get_graph_view\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom io import BytesIO, StringIO\r\n\r\nfrom functools import wraps, update_wrapper\r\nfrom datetime import datetime\r\napp = Flask(__name__)\r\ndef nocache(view):\r\n @wraps(view)\r\n def no_cache(*args, **kwargs):\r\n response = make_response(view(*args, **kwargs))\r\n response.headers['Last-Modified'] = datetime.now()\r\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\r\n response.headers['Pragma'] = 'no-cache'\r\n response.headers['Expires'] = '-1'\r\n return response \r\n return update_wrapper(no_cache, view)\r\n\r\n\r\n\r\n\r\n\r\ndef create_endpoints(app,services):\r\n team_service = services.team_service\r\n\r\n @app.route(\"/\",methods=['GET'])\r\n def home():\r\n return render_template('home.html')\r\n\r\n @app.route(\"/game_log\",methods=['GET'])\r\n def game_log():\r\n results=team_service.team_roaster()\r\n return render_template('team_list.html', results=results)\r\n\r\n ###๊ฒŒ์ž„์•„์ด๋”” ๊ฐ€์ ธ์˜ค๊ธฐ\r\n @app.route(\"/game_log/<string:abbrev>\",methods=['GET'])\r\n def team_game_list(abbrev):\r\n games_list=team_service.get_gamelist(abbrev)\r\n return render_template('OKC_game_id.html', results=games_list, abbrev=abbrev)\r\n\r\n @app.route('/game_log/<string:abbrev>/<string:game_id>', methods=['GET'])\r\n @nocache\r\n def team_game_log(abbrev,game_id):\r\n tracker=team_service.get_game_log(game_id)\r\n tables=[tracker.to_html(classes='female')]\r\n return render_template('last_game_dataframe.html',title='Last game ON Portland', tables=tables, titles='saxycow',\r\n abbrev=abbrev,game_id=game_id)\r\n\r\n ##view_function\r\n @app.route('/game_log/<string:abbrev>/<string:game_id>/<string:stat>')\r\n @nocache\r\n def fig(abbrev,game_id,stat):\r\n last_game_tracker=team_service.get_game_log(game_id)\r\n plt.rcParams[\"figure.figsize\"] = (20,10)\r\n plt.rcParams['lines.linewidth'] = 2\r\n plt.rcParams['lines.color'] = 'r'\r\n plt.rcParams['axes.grid'] = True\r\n y=list(last_game_tracker[stat])\r\n x=list(last_game_tracker[\"PLAYER_NAME\"])\r\n plt.bar(x, y)\r\n plt.xlabel('Player_name', fontsize=18)\r\n plt.xticks(x, x, fontsize=15,rotation=45)\r\n img = BytesIO()\r\n plt.savefig(img, format='png', dpi=300)\r\n img.seek(0)## object๋ฅผ ์ฝ์—ˆ๊ธฐ ๋•Œ๋ฌธ์— ์ฒ˜์Œ์œผ๋กœ ๋Œ์•„๊ฐ€์คŒ\r\n return send_file(img, mimetype='image/png')\r\n\r\n" }, { "alpha_fraction": 0.7227723002433777, "alphanum_fraction": 0.7301980257034302, "avg_line_length": 29.230770111083984, "blob_id": "aa82c8f147922cd240c8febbd91477692f728c63", "content_id": "8db6344c6dc83fa7c098a5be52338a9c00c5664d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/injury_service/nba_injury.py", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nhtml=urlopen(\"https://www.cbssports.com/nba/injuries/\")\r\n\r\npage=html.read() #์ด์ œ ์ด๋†ˆ์„ soup๋ฅผ ์ด์šฉํ•˜์—ฌ ํŒŒ์‹ฑํ•ด๋ผ.\r\nsoup =BeautifulSoup(page,'html.parser')\r\nchildren_node=list(soup.children)\r\ntext=soup.find_all('div',\"Page-colMain\")\r\n\r\nwhole_t_name=text[0].find_all('span',\"TeamName\")\r\nwhole_p_name=text[0].find_all('span',\"CellPlayerName--short\")" }, { "alpha_fraction": 0.5534725189208984, "alphanum_fraction": 0.560289740562439, "avg_line_length": 34.10769271850586, "blob_id": "30627921778c4ae24c63daa71b8d35e369b7ac7f", "content_id": "b8252f82d183c0cd77a3e1d684247356dcbb9835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2387, "license_type": "no_license", "max_line_length": 95, "num_lines": 65, "path": "/service/TeamService.py", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "\r\nfrom nba_api.stats.endpoints import BoxScorePlayerTrackV2\r\nfrom nba_api.stats.static import teams\r\nfrom nba_api.stats.endpoints import TeamGameLog\r\n\r\nimport json\r\nimport pandas as pd\r\nclass TeamService:\r\n #DB๊ฐ€ ์žˆ์œผ๋ฉด ์•ˆ ํ•ด๋„ ๋จ\r\n def __init__(self):\r\n pass\r\n def team_roaster(self):\r\n all_team=teams.get_teams()\r\n all_teams=[]\r\n for i in range(len(all_team)):\r\n team={}\r\n team.update(\r\n {\r\n 'team_id':all_team[i]['id'],\r\n 'team_name':all_team[i]['full_name'],\r\n 'abbrev':all_team[i]['abbreviation'],\r\n 'nickname':all_team[i]['nickname'],\r\n 'city':all_team[i]['city']\r\n }\r\n )\r\n all_team.append(team)\r\n return [teams for teams in all_team]\r\n\r\n def get_team_id(team_abbrev):\r\n \tall_team=teams.get_teams()\r\n \tfor team in all_team:\r\n \tfor key, value in team.items():\r\n \t\tif key=='abbreviation':\r\n \t\t\tif value==team_abbrev:\r\n \t\t\t\treturn team['id']\r\n##DB์— ๊ฒŒ์ž„ ๋กœ๊ทธ๋ฅผ ์•ˆ ๋„ฃ์–ด๋‘˜ ๊ฒฝ์šฐ\r\n def get_gamelist(self,team_abbrev):\r\n team_id=TeamService.get_team_id(team_abbrev)\r\n r= TeamGameLog(season='2019-20', season_type_all_star='Regular Season',team_id=team_id)\r\n career=r.get_json()\r\n career=json.loads(career)\r\n game_log=career['resultSets'][0]['rowSet']\r\n games_list=[]\r\n for i in range(len(game_log)):\r\n game_set={}\r\n game_set={\r\n 'game_id':game_log[i][1],\r\n 'game_date':game_log[i][2],\r\n 'game_team':game_log[i][3]\r\n }\r\n games_list.append(game_set)\r\n return [game for game in games_list]\r\n\r\n def get_game_log(self,game_id):\r\n \ttracker_dict={}\r\n \tr=BoxScorePlayerTrackV2(game_id=game_id)\r\n \tplayer_tracker=r.get_json()\r\n \tplayer_tracker=json.loads(player_tracker)\r\n \ttracker_result=player_tracker[\"resultSets\"][0]\r\n \tfor j in range(0,len(tracker_result['headers'])):\r\n \t\ttracker_dict[tracker_result['headers'][j]]=[]\r\n \tfor k in range(0,len(tracker_result[\"rowSet\"])):\r\n \t\tfor j in range(0,len(tracker_result['headers'])):\r\n \t\t\ttracker_dict[tracker_result['headers'][j]].append(tracker_result[\"rowSet\"][k][j])\r\n \tlast_game_tracker=pd.DataFrame(tracker_dict)\r\n \treturn last_game_tracker" }, { "alpha_fraction": 0.7092437148094177, "alphanum_fraction": 0.7109243869781494, "avg_line_length": 22.875, "blob_id": "24198fc7d56cced5903f327cb3260975af28a1f2", "content_id": "66d2732eff3069de2d510d34d2106dae333ef279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "no_license", "max_line_length": 84, "num_lines": 24, "path": "/app.py", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "from flask import request, jsonify, render_template, Flask, send_file, make_response\r\nfrom functools import wraps\r\nfrom nba_api.stats.endpoints import BoxScorePlayerTrackV2\r\nfrom nba_api.stats.static import teams\r\nfrom nba_api.stats.endpoints import TeamGameLog\r\n\r\n\r\nfrom service import TeamService\r\nfrom view import create_endpoints\r\n\r\nclass Services:\r\n pass\r\n\r\ndef create_app():\r\n app=Flask(__name__)\r\n services = Services\r\n services.team_service=TeamService()\r\n\r\n create_endpoints(app, services)\r\n return app\r\n\r\napp=create_app()\r\nif __name__ == '__main__':\r\n app.run()" }, { "alpha_fraction": 0.7076923251152039, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 14.75, "blob_id": "6538af440b0b4d295bffc1010bbf2f60281f9fa7", "content_id": "6ea46ce3a0caf43380fe622d193471a3af925f81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/service/__init__.py", "repo_name": "SungPyo-LEE/NBA_Flask", "src_encoding": "UTF-8", "text": "from .TeamService import TeamService\r\n\r\n__all__=[\r\n'TeamService']" } ]
6
amirfrsd/Python-Image-Processing
https://github.com/amirfrsd/Python-Image-Processing
70bd1011583e3e8529558da92af99d2cdf5909e5
a860f8d44ee495b9372046cbea6b64a2c36e7253
6c69af6da38e98e949fff9748b2f71fa93a0ddc6
refs/heads/master
2021-01-23T01:13:01.736864
2017-05-30T18:01:39
2017-05-30T18:01:39
92,860,904
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6167883276939392, "alphanum_fraction": 0.6357664465904236, "avg_line_length": 30.159090042114258, "blob_id": "f72aea7957165f1af60acacf721f9bb7646ad408", "content_id": "52817f4d563947aabb2cec50704c0922de6e1bed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1370, "license_type": "permissive", "max_line_length": 58, "num_lines": 44, "path": "/main.py", "repo_name": "amirfrsd/Python-Image-Processing", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plot\nimport matplotlib.ticker as mtick\nimport time\nimport functools as fnt\nfrom collections import Counter\nfrom PIL import Image\n\ndef recognitionFunc(filePath):\n recognized=[]\n exampleNumberColors=open('numtext.txt','r').read()\n exampleNumberColors=exampleNumberColors.split('\\n')\n i=Image.open(filePath)\n iArray=np.array(i)\n iArrayList=iArray.tolist()\n inquestion=str(iArrayList)\n for examples in exampleNumberColors:\n if(len(examples)>3):\n splitTexts=examples.split('::')\n numberIs=splitTexts[0]\n pixelArrayIs=splitTexts[1]\n eachPixelEx=pixelArrayIs.split('],')\n eachPixelInq=inquestion.split('],')\n a=0\n while(a<len(eachPixelEx)):\n if(eachPixelEx[a]==eachPixelInq[a]):\n recognized.append(int(numberIs))\n a += 1\n c=Counter(recognized)\n print(c)\n print(max(c.values()))\n xAxis=[]\n yAxis=[]\n for eachNumber in c:\n xAxis.append(eachNumber)\n yAxis.append(c[eachNumber])\n ax1=plot.subplot2grid((4,4),(0,0),rowspan=1,colspan=4)\n ax2=plot.subplot2grid((4,4),(1,0),rowspan=3,colspan=4)\n ax1.imshow(iArray)\n print(xAxis,yAxis)\n ax2.bar(xAxis,yAxis,align=\"center\")\n plot.ylim(350)\n plot.show()\nrecognitionFunc('test.png')" }, { "alpha_fraction": 0.7756410241127014, "alphanum_fraction": 0.7756410241127014, "avg_line_length": 21.285715103149414, "blob_id": "caea9f7440e116fb232c22a7f2cfdd6161a5d929", "content_id": "66d5293db00c914ca9c121ebaaba5f9da9c2de9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 156, "license_type": "permissive", "max_line_length": 65, "num_lines": 7, "path": "/README.md", "repo_name": "amirfrsd/Python-Image-Processing", "src_encoding": "UTF-8", "text": "# Python-Image-Processing\n\nSimple Python Image Processing\n\nRun it using `python main.py`\n\nDon't forget to `pip install matplotlib` and `pip install pillow`\n" } ]
2
pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN
https://github.com/pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN
66c4cc33e7b43330687d9dce1dfc0c78f84edf81
19da4a15439f1be88cf038901103aa168efba7b5
6fd3c9fe41e498f93b63124260abc559c46009ae
refs/heads/main
2023-07-05T13:41:49.700054
2021-06-30T21:07:31
2021-06-30T21:07:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7501505017280579, "alphanum_fraction": 0.768813967704773, "avg_line_length": 65.4800033569336, "blob_id": "b7a18d140a84c4a5585a096d875c0e50dff16cbc", "content_id": "df5baa847cbb001d508ebcb70127f5d901984a68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1661, "license_type": "no_license", "max_line_length": 132, "num_lines": 25, "path": "/preprocess/README.md", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "# TTS\n1. Generate .wav files from text strings\n2. text or caption should be processed into a list of strings, tokenized and all lowercase.\n3. .wav audio files need to be further processed into mel spectrogram files\n\n## Tacotron2 pre-processing\n1. clone [https://github.com/xinshengwang/Tacotron2_batch_inference](https://github.com/xinshengwang/Tacotron2_batch_inference) repo\n2. Fix the bug in the waveglow dependency as follows:\n\t1. clone the [original Tacotron2 repo](https://github.com/NVIDIA/tacotron2)\n\t2. CD into this repo: `cd tacotron2`\n\t3. Initialize and update the submodule: `git submodule init; git submodule update`\n\t4. copy the `glow.py` and `glow_old.py` from this repo into the `Tacotron2_batch_inference` directory to replace the version there\n3. Before running the inference to generate the .wav files, need to download the pre-trained Tacotron2 and Waveglow checkpoints\n4. Checkpoints can be downloaded from the [oritinal Tacotron2 repo](https://github.com/NVIDIA/tacotron2)\n5. cd back to `Tacotron2_batch_inference` directory\n6. mkdir data/output; this will be where the .wav files are saved\n7. run `python inference.py --tacotron2 <path to where tacotron2 chkpt is saved> --waveglow <path to saved waveglow chkpt>\n\n## .wav audio files to Mel\n1. run `python wavaudio_to_npy.py` to read wav to np arrays and save them as .npy files\n2. these files are saved under `audio_npy`\n3. run `python wavaudio_to_mel.py` to convert the .npy files in `audio_npy` into mel spectogram files and save them in \n `audio_mel` dir as .npy files with the same structure and filenames\n \n4. the .npy files in `audio_mel` have identical names as in `audio_npy`" }, { "alpha_fraction": 0.595995306968689, "alphanum_fraction": 0.6166077852249146, "avg_line_length": 29.872726440429688, "blob_id": "37dc8a0c5dcf0e3cdbc6cf2bcacec86230b9161c", "content_id": "07196a38af9ad17338cdbe6e89c27675728113fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1698, "license_type": "no_license", "max_line_length": 102, "num_lines": 55, "path": "/preprocess/wavaudio_to_mel.py", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "\"\"\"\nGo through the audio_npy direcoty and convert all audio (npy files) to mel then save as np array files\nThis script is the same as Audio_to_mel.py script in https://github.com/xinshengwang/S2IGAN\n\"\"\"\nimport numpy as np\nimport librosa\nimport os\n\n\ndef audio_processing(input_file):\n \"\"\"\n\n :param input_file: np.ndarray of each wav audio file\n :return: np.ndarray of (variable size according to audio length, 40)\n \"\"\"\n\n y = input_file\n sr = 22050\n window_size = 25\n stride = 10\n input_dim = 40\n ws = int(sr * 0.001 * window_size)\n st = int(sr * 0.001 * stride)\n feat = librosa.feature.melspectrogram(y=y, sr=sr, n_mels = input_dim, n_fft=ws, hop_length=st)\n feat = np.log(feat + 1e-6)\n\n feat = [feat]\n cmvn = True\n\n feat = np.concatenate(feat, axis=0)\n if cmvn:\n feat = (feat - feat.mean(axis=1)[:, np.newaxis]) / (feat.std(axis=1) + 1e-16)[:, np.newaxis]\n\n return np.swapaxes(feat, 0, 1).astype('float32')\n\nroot = 'audio_npy'\nsave_root = 'audio_mel'\nclss_names = os.listdir(root)\nfor clss_name in sorted(clss_names):\n clss_path = os.path.join(root,clss_name)\n img_names = os.listdir(clss_path)\n for img_name in sorted(img_names):\n name = img_name.split('.')[0]\n audio_path = os.path.join(clss_path,img_name)\n audios =np.load(audio_path,allow_pickle=True)\n mels = []\n for audio in audios:\n mel = audio_processing(audio)\n mels.append(mel)\n save_dir = os.path.join(save_root,clss_name)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_path = save_dir + '/' + name + '.npy'\n np.save(save_path, mels)\n print(clss_name)\n" }, { "alpha_fraction": 0.7694370150566101, "alphanum_fraction": 0.7935656905174255, "avg_line_length": 31.434782028198242, "blob_id": "b3c3c39202d49a207177785e89c1d6af96871441", "content_id": "2f649d793a9a775756e29681e73d8bc02e71172e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 746, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/preprocess/get_celebAHQ_dataset.sh", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "#mk directories for the dataset\nmkdir -p mmca/{images,audio,train,test}\nmkdir -p mmca/audio/{mel,wav}\n\n#download the filenames.pick for the train and test partitions\ncd mmca/train\ngdown https://drive.google.com/uc?id=1GdeTdBpi_IV7AuBpJAhLElqjswRmOy-7\n\ncd ../test\ngdown https://drive.google.com/uc?id=1JNxgdvPMI_HHUq2-JUuJp8L7cD-74OAf\n\n#download the text files for the image captions\ncd ../\ngdown https://drive.google.com/uc?id=1X1EFCyralNN2Bg3LhelL_lShrSrmTitW\nunzip text.zip\nrm text.zip\n\n#download the image files and only keep the .jpg image files in mmca/images\ngdown https://drive.google.com/uc?id=1badu11NqxGf6qM3PTTooQDJvQbejgbTv\nunzip CelebAMask-HQ.zip\nrm CelebAMask-HQ.zip\nmv CelebAMask-HQ/CelebA-HQ-img/*.jpg images/\nrm -r CelebAMask-HQ\n" }, { "alpha_fraction": 0.5812183022499084, "alphanum_fraction": 0.5824872851371765, "avg_line_length": 34.818180084228516, "blob_id": "e8a1c81d8f1682b6f0fa0e97fdeb912e481d0bdb", "content_id": "2f307ec48dbde091c3d869367faa6c4bc5d0a693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1576, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/preprocess/wavaudio_to_npy.py", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "\"\"\"\nThis script is adapted from the Audio_to_npy.py script in https://github.com/xinshengwang/S2IGAN.\nRead in all the .wav audio files in a specified directory and save the list of np.arrays as .npy files\n\"\"\"\nimport numpy as np\nimport librosa\nimport os\n\npath = 'TTS/data/output'\nclss_names = os.listdir(path)\nsave_root = 'audio_npy'\nfor clss_name in sorted(clss_names):\n print(clss_name)\n clss_path = os.path.join(path,clss_name)\n try:\n img_names = os.listdir(clss_path)\n except NotADirectoryError:\n img_names = [clss_path]\n for img_name in sorted(img_names):\n img_path = os.path.join(clss_path, img_name)\n try:\n audio_names = os.listdir(img_path)\n except NotADirectoryError or FileNotFoundError:\n audio_names = [img_name]\n audio = []\n for audio_name in sorted(audio_names):\n print(audio_name)\n audio_path = os.path.join(img_path,audio_name)\n if os.path.exists(audio_path):\n y, sr = librosa.load(audio_path)\n else:\n y, sr = librosa.load(audio_name)\n audio.append(y)\n save_path = save_root + '/'+ clss_name\n\n if save_path.endswith('.wav'):\n save_path = save_path.replace('.', '_')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n if save_path.endswith('_wav'):\n save_name = save_path + '/' + save_path.split('/')[-1] + '.npy'\n else:\n save_name = save_path + '/' + img_name + '.npy'\n np.save(save_name, audio)\n" }, { "alpha_fraction": 0.8114285469055176, "alphanum_fraction": 0.822857141494751, "avg_line_length": 42.75, "blob_id": "f09d5eb275e8c6642782e0f0a1748ea7029c1b8d", "content_id": "87752c8dff0bd3e39516591fa93082b94c180465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 175, "license_type": "no_license", "max_line_length": 66, "num_lines": 4, "path": "/README.md", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "# HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN\n\nS2IGAN: Speech-to-Image Generation via Adversarial Learning\nhttps://xinshengwang.github.io/project/s2igan/\n" }, { "alpha_fraction": 0.6136576533317566, "alphanum_fraction": 0.6155285239219666, "avg_line_length": 23.86046600341797, "blob_id": "ab533b468b45a10378ffeb6efdf0aab46e03fc88", "content_id": "3c48559b1ee1f71992375ca48ed728560f266a10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 64, "num_lines": 43, "path": "/preprocess/captions_to_pickle.py", "repo_name": "pokarats/HLCV-Project-Facial-Image-Generation-from-Speech-Input-using-GAN", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom tqdm import tqdm\nimport pickle\n\ncwd = Path(__file__).resolve().parent\nceleba_dir = cwd / 'mmca' / 'celeba-caption'\noutput_dir = cwd / 'mmca' / 'captions_pickles'\n\ntry:\n output_dir.mkdir(parents=True, exist_ok=False)\nexcept FileExistsError:\n print(f'{output_dir} is already there')\nelse:\n print(f'{output_dir} created to store caption pickle files')\n\n\ndef process_file(infile):\n captions = []\n with open(infile, 'r') as f:\n for line in f:\n captions.append(line.lower().rstrip())\n return captions\n\n\ndef main():\n idx = 0\n for file in tqdm(celeba_dir.iterdir(), desc=\"caption file\"):\n list_of_texts = process_file(file)\n out_filename = output_dir / f\"{str(file.stem)}.pickle\"\n pickle.dump(list_of_texts, open(out_filename, 'wb'))\n idx += 1\n\n print(f'Processed {idx} files to pickle files.')\n\n \"\"\" \n for file in output_dir.iterdir():\n list_of_texts = pickle.load(open(file, 'rb'))\n print(list_of_texts)\n \"\"\"\n\n\nif __name__ == '__main__':\n main()\n" } ]
6
M3g4r00t/DSB2018-2
https://github.com/M3g4r00t/DSB2018-2
89fec287620a5d1816176ca11774e1d0ad53499c
bf835496a758ca8022b354d05c46192a0fe4bd7b
b60872eefbdaceadf2bcd00c6467aea6527fb8ab
refs/heads/master
2021-04-27T01:34:11.033674
2018-03-19T21:16:09
2018-03-19T21:16:09
122,677,981
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6093039512634277, "alphanum_fraction": 0.6169989705085754, "avg_line_length": 37.635135650634766, "blob_id": "8026e700b2f4460b7c58851c7248e7192262c45e", "content_id": "c54318168ada3a26d412313187213497f0a0ce33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2859, "license_type": "no_license", "max_line_length": 113, "num_lines": 74, "path": "/src/pp-image-01.py", "repo_name": "M3g4r00t/DSB2018-2", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport sys\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\nfrom scipy import misc\nfrom skimage.io import imread\nfrom skimage.transform import resize\nfrom tqdm import tqdm\n\n# Set some parameters\nIMG_CHANNELS = 3\nIMG_WIDTH = 128\nIMG_HEIGHT = 128\nROOT_PATH = 'D:\\\\Dennys\\\\Personal\\\\Cursos\\\\BecaOEA\\\\PPGCC\\\\Others\\\\Startup\\\\Kaggler\\\\DSB2018\\\\'\nTRAIN_PATH = ROOT_PATH + 'input\\\\stage1_train\\\\'\nTEST_PATH = ROOT_PATH + 'input\\\\stage1_test\\\\'\nTRAIN_PATH_PP = ROOT_PATH + 'input\\\\stage1_train_pp\\\\'\nTEST_PATH_PP = ROOT_PATH + 'input\\\\stage1_test_pp\\\\'\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\nseed = 42\nrandom.seed = seed\nnp.random.seed = seed\n\n# Set enviroment\nif not os.path.exists(TRAIN_PATH_PP):\n os.mkdir(TRAIN_PATH_PP)\n\nif not os.path.exists(TEST_PATH_PP):\n os.mkdir(TEST_PATH_PP)\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\ntest_ids = next(os.walk(TEST_PATH))[1]\n\nprint('Getting and resizing train images and masks ... ' + str(len(train_ids)))\nsys.stdout.flush()\n\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n path = TRAIN_PATH + id_\n file = path + '/images/' + id_ + '.png'\n img = imread(file)[:, :, :IMG_CHANNELS]\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n\n if not os.path.exists(TRAIN_PATH_PP + id_):\n os.mkdir(TRAIN_PATH_PP + id_)\n os.mkdir(TRAIN_PATH_PP + id_ + '/images/')\n os.mkdir(TRAIN_PATH_PP + id_ + '/masks/')\n # imsave(TRAIN_PATH_PP + id_ + '/images/' + id_ + '.png', img)\n misc.imsave(TRAIN_PATH_PP + id_ + '/images/' + id_ + '.png', img)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n for mask_file in next(os.walk(path + '/masks/'))[2]:\n mask_ = imread(path + '/masks/' + mask_file)\n mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',\n preserve_range=True), axis=-1)\n mask = np.maximum(mask, mask_)\n plt.imsave(TRAIN_PATH_PP + id_ + '/masks/' + id_ + '.png', np.array(mask).reshape(IMG_HEIGHT, IMG_WIDTH),\n cmap=cm.gray)\n\nprint('Getting and resizing test images ... ' + str(len(test_ids)))\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\n path = TEST_PATH + id_\n file = path + '/images/' + id_ + '.png'\n img = imread(file)[:, :, :IMG_CHANNELS]\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n if not os.path.exists(TEST_PATH_PP + id_):\n os.mkdir(TEST_PATH_PP + id_)\n os.mkdir(TEST_PATH_PP + id_ + '/images/')\n # imsave(TEST_PATH_PP + id_ + '/images/' + id_ + '.png', img)\n misc.imsave(TEST_PATH_PP + id_ + '/images/' + id_ + '.png', img)\n" }, { "alpha_fraction": 0.6067672371864319, "alphanum_fraction": 0.6227709054946899, "avg_line_length": 30.84951400756836, "blob_id": "fc3c6b4fc30045c89bc5e67d56997fa327beac3a", "content_id": "3ea445ebd89609b76e195b2fb71f7e0917e340ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6561, "license_type": "no_license", "max_line_length": 121, "num_lines": 206, "path": "/src/ml-u-net-02-analysis.py", "repo_name": "M3g4r00t/DSB2018-2", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport sys\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom skimage.io import imread, imshow\nfrom skimage.morphology import label\nfrom tqdm import tqdm\n\n# Set some parameters\nIMG_WIDTH = 256\nIMG_HEIGHT = 256\nIMG_CHANNELS = 3\nBATCH_SIZE = 10 # the higher the better\nROOT_PATH = 'D:\\\\Dennys\\\\Personal\\\\Cursos\\\\BecaOEA\\\\PPGCC\\\\Others\\\\Startup\\\\Kaggler\\\\DSB2018\\\\'\nTRAIN_PATH = ROOT_PATH + 'input\\\\stage1_train_pp_02\\\\'\nTEST_PATH = ROOT_PATH + 'input\\\\stage1_test\\\\'\nMODEL = 'model-dsbowl2018-4'\n\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\ninput_seed = 42\nrandom.seed = input_seed\nnp.random.seed(input_seed)\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\ntest_ids = next(os.walk(TEST_PATH))[1]\n\n# Get and resize train images and masks\nX_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nY_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\nprint('Getting train images and masks ... ')\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n path = TRAIN_PATH + id_\n img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS]\n X_train[n] = img\n mask = imread(path + '/masks/' + id_ + '.png')[:, :, :1]\n Y_train[n] = mask\n\n# Get and resize test images\n'''\nX_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nsizes_test = []\nprint('Getting and resizing test images ... ' + str(len(test_ids)))\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\n path = TEST_PATH + id_\n img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS]\n sizes_test.append([img.shape[0], img.shape[1]])\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n X_test[n] = img\n'''\nprint('Done!')\n\n\n# Define IoU metric\ndef mean_iou(y_true, y_pred):\n prec = []\n for t in np.arange(0.5, 1.0, 0.05):\n y_pred_ = tf.to_int32(y_pred > t)\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([up_opt]):\n score = tf.identity(score)\n prec.append(score)\n return K.mean(K.stack(prec), axis=0)\n\n\n# Predict on train, val and test\nmodel = load_model(ROOT_PATH + 'output\\\\' + MODEL + '.h5', custom_objects={'mean_iou': mean_iou})\npreds_train = model.predict(X_train, verbose=1)\n'''\npreds_test = model.predict(X_test, verbose=1)\n'''\n\n# Threshold predictions\npreds_train_t = (preds_train > 0.5).astype(np.uint8)\n'''\npreds_test_t = (preds_test > 0.5).astype(np.uint8)\n'''\n# Perform a sanity check on some random training samples\nix = random.randint(0, len(preds_train_t))\nimshow(X_train[ix])\nplt.show()\nimshow(np.squeeze(Y_train[ix]))\nplt.show()\nimshow(np.squeeze(preds_train_t[ix]))\nplt.show()\n\n\n# Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if b > prev + 1:\n run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\n\n\ndef prob_to_rles(x, cutoff=0.5):\n lab_img = label(x > cutoff)\n for ii in range(1, lab_img.max() + 1):\n yield rle_encoding(lab_img == ii)\n\n\ndef iou_metric(y_true_in, y_pred_in, print_table=False):\n labels = label(y_true_in > 0.5)\n y_pred = label(y_pred_in > 0.5)\n\n true_objects = len(np.unique(labels))\n pred_objects = len(np.unique(y_pred))\n\n intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]\n\n # Compute areas (needed for finding the union between all objects)\n area_true = np.histogram(labels, bins=true_objects)[0]\n area_pred = np.histogram(y_pred, bins=pred_objects)[0]\n area_true = np.expand_dims(area_true, -1)\n area_pred = np.expand_dims(area_pred, 0)\n\n # Compute union\n union = area_true + area_pred - intersection\n\n # Exclude background from the analysis\n intersection = intersection[1:, 1:]\n union = union[1:, 1:]\n union[union == 0] = 1e-9\n\n # Compute the intersection over union\n iou = intersection / union\n\n # Precision helper function\n def precision_at(threshold, iou):\n matches = iou > threshold\n true_positives = np.sum(matches, axis=1) == 1 # Correct objects\n true_negatives = np.sum(matches, axis=0) == 1 # Correct objects\n false_positives = np.sum(matches, axis=0) == 0 # Missed objects\n false_negatives = np.sum(matches, axis=1) == 0 # Extra objects\n tp, tn, fp, fn = np.sum(true_positives), np.sum(true_negatives), np.sum(false_positives), np.sum(false_negatives)\n return tp, tn, fp, fn\n\n # Loop over IoU thresholds\n prec = []\n tp_array = []\n tn_array = []\n fp_array = []\n fn_array = []\n if print_table:\n print(\"Thresh\\tTP\\tFP\\tFN\\tPrec.\")\n for t in np.arange(0.5, 1.0, 0.05):\n tp, tn, fp, fn = precision_at(t, iou)\n if (tp + fp + fn) > 0:\n p = tp / (tp + fp + fn)\n else:\n p = 0\n if print_table:\n print(\"{:1.3f}\\t{}\\t{}\\t{}\\t{:1.3f}\".format(t, tp, fp, fn, p))\n prec.append(p)\n tp_array.append(tp)\n tn_array.append(tn)\n fp_array.append(fp)\n fn_array.append(fn)\n\n if print_table:\n print(\"AP\\t-\\t-\\t-\\t{:1.3f}\".format(np.mean(prec)))\n return np.mean(prec), np.mean(tp_array), np.mean(tn_array), np.mean(fp_array), np.mean(fn_array)\n\n\ndef mean_iou_array(y_true_array, y_pred_array):\n prec = []\n tp_array = []\n tn_array = []\n fp_array = []\n fn_array = []\n print('Getting train scores ... ')\n sys.stdout.flush()\n for n, _ in tqdm(enumerate(y_true_array), total=len(y_true_array)):\n score, tp, tn, fp, fn = iou_metric(y_true_array[n], y_pred_array[n])\n prec.append(score)\n tp_array.append(tp)\n tn_array.append(tn)\n fp_array.append(fp)\n fn_array.append(fn)\n return prec, tp_array, tn_array, fp_array, fn_array\n\n\nsub = pd.DataFrame()\nsub['ImageId'] = train_ids\nscore, tp, tn, fp, fn = mean_iou_array(Y_train, preds_train_t)\nsub['IoU'] = score\nsub['tp'] = tp\nsub['tn'] = tn\nsub['fp'] = fp\nsub['fn'] = fn\nsub.to_csv(ROOT_PATH + 'output\\\\' + MODEL + '-analysis.csv', index=False)\n" }, { "alpha_fraction": 0.5694515109062195, "alphanum_fraction": 0.578569769859314, "avg_line_length": 38.13949203491211, "blob_id": "eb33b7e6fe34b52a18782d89dfa45839eb774776", "content_id": "f2865dbd971a11a692e00466b5f925746a86e8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21605, "license_type": "no_license", "max_line_length": 108, "num_lines": 552, "path": "/src/aug-image-02.py", "repo_name": "M3g4r00t/DSB2018-2", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport sys\nimport warnings\n\nimport numpy as np\nfrom PIL import Image\nfrom imgaug import augmenters as iaa, misc\nfrom skimage.io import imread\nfrom tqdm import tqdm\n\n# Image dimensions (resize)\nIMG_CHANNELS = 3\nIMG_WIDTH = 256\nIMG_HEIGHT = 256\n\n# Batch size for data augmentation\nBATCH_SIZE = 32\n\n# Filename setup\nROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '\\\\'\nTRAIN_PATH = ROOT_PATH + 'input\\\\stage1_train\\\\'\nTEST_PATH = ROOT_PATH + 'input\\\\stage1_test\\\\'\nTRAIN_PATH_PP = ROOT_PATH + 'input\\\\stage1_train_pp_02\\\\'\nTEST_PATH_PP = ROOT_PATH + 'input\\\\stage1_test_pp_02\\\\'\nTRAIN_PATH_AUG = ROOT_PATH + 'input\\\\stage1_train_aug_02\\\\'\nNOISE_PATH = ROOT_PATH + 'input\\\\'\n\n# Skimage warnings\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\n\n# For experiment replication\ninput_seed = 42 # the universe response\nrandom.seed = input_seed\nnp.random.seed(input_seed)\n\n# Set enviroment\nif not os.path.exists(TRAIN_PATH_AUG):\n os.mkdir(TRAIN_PATH_AUG)\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH_PP))[1]\n\nprint('Getting and transforming train images and masks ... ' + str(len(train_ids)))\nsys.stdout.flush()\n\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n path = TRAIN_PATH_PP + id_\n file = path + '/images/' + id_ + '.png'\n img = imread(file)[:, :, :IMG_CHANNELS]\n\n # Transform #0\n\n id_aug = id_ + '-0'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True),\n iaa.Fliplr(1, deterministic=True)], deterministic=True) # none\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #1\n\n id_aug = id_ + '-1'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True)], deterministic=True) # horizontal flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #2\n\n id_aug = id_ + '-2'\n seq = iaa.Sequential([iaa.Flipud(1, deterministic=True)], deterministic=True) # vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #3\n\n id_aug = id_ + '-3'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True), iaa.Flipud(1, deterministic=True)],\n deterministic=True) # horizontal + vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #4\n\n id_aug = id_ + '-4'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\")]) # change color\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #5\n\n id_aug = id_ + '-5'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Fliplr(1, deterministic=True)]) # change color + horizontal flip\n\n seq_mask = iaa.Sequential([iaa.Fliplr(1, deterministic=True)], deterministic=True) # horizontal flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #6\n\n id_aug = id_ + '-6'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Flipud(1, deterministic=True)]) # change color + vertical flip\n\n seq_mask = iaa.Sequential([iaa.Flipud(1, deterministic=True)], deterministic=True) # vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #7\n\n id_aug = id_ + '-7'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Fliplr(1, deterministic=True),\n iaa.Flipud(1, deterministic=True)]) # change color + horizontal + vertical flip\n\n seq_mask = iaa.Sequential([iaa.Fliplr(1, deterministic=True), iaa.Flipud(1, deterministic=True)],\n deterministic=True) # horizontal + vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #8\n\n id_aug = id_ + '-8'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-01.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #9\n\n id_aug = id_ + '-9'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-01.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(90)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #10\n\n id_aug = id_ + '-10'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-01.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(180)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #11\n\n id_aug = id_ + '-11'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-01.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(270)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #12\n\n id_aug = id_ + '-12'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-02.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #13\n\n id_aug = id_ + '-13'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-02.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(90)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #14\n\n id_aug = id_ + '-14'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-02.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(180)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #15\n\n id_aug = id_ + '-15'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-02.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(270)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #16\n\n id_aug = id_ + '-16'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-03.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #16\n\n id_aug = id_ + '-16'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-03.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(90)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #17\n\n id_aug = id_ + '-17'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-03.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(180)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #18\n\n id_aug = id_ + '-18'\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n\n background = Image.open(file)\n foreground = Image.open(NOISE_PATH + \"noise-03.png\")\n\n background = background.convert('RGBA')\n foreground = foreground.convert('RGBA')\n foreground = foreground.rotate(270)\n\n img_aug = Image.new(\"RGBA\", background.size)\n img_aug = Image.alpha_composite(img_aug, background)\n img_aug = Image.alpha_composite(img_aug, foreground)\n\n img_aug = img_aug.convert(\"RGB\")\n img_aug.save(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png')\n\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n" }, { "alpha_fraction": 0.5719146728515625, "alphanum_fraction": 0.5820099711418152, "avg_line_length": 43.979591369628906, "blob_id": "16fdf440bfce2ba8798844fd58f8f5b15cc495fd", "content_id": "75ab6cc5b81ad83cc8021cf9be867209d677cd73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8816, "license_type": "no_license", "max_line_length": 108, "num_lines": 196, "path": "/src/aug-image-01.py", "repo_name": "M3g4r00t/DSB2018-2", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport sys\nimport warnings\n\nimport numpy as np\nfrom imgaug import augmenters as iaa, misc\nfrom skimage.io import imread\nfrom tqdm import tqdm\n\n# Set some parameters\nIMG_CHANNELS = 3\nIMG_WIDTH = 128\nIMG_HEIGHT = 128\nROOT_PATH = 'D:\\\\Dennys\\\\Personal\\\\Cursos\\\\BecaOEA\\\\PPGCC\\\\Others\\\\Startup\\\\Kaggler\\\\DSB2018\\\\'\nTRAIN_PATH = ROOT_PATH + 'input\\\\stage1_train\\\\'\nTEST_PATH = ROOT_PATH + 'input\\\\stage1_test\\\\'\nTRAIN_PATH_PP = ROOT_PATH + 'input\\\\stage1_train_pp\\\\'\nTEST_PATH_PP = ROOT_PATH + 'input\\\\stage1_test_pp\\\\'\nTRAIN_PATH_AUG = ROOT_PATH + 'input\\\\stage1_train_aug\\\\'\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\nseed = 42\nrandom.seed = seed\nnp.random.seed = seed\n\n# Set enviroment\nif not os.path.exists(TRAIN_PATH_AUG):\n os.mkdir(TRAIN_PATH_AUG)\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH_PP))[1]\n\nprint('Getting and transforming train images and masks ... ' + str(len(train_ids)))\nsys.stdout.flush()\n\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n path = TRAIN_PATH_PP + id_\n file = path + '/images/' + id_ + '.png'\n img = imread(file)[:, :, :IMG_CHANNELS]\n\n # Transform #0\n\n id_aug = id_ + '-0'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True),\n iaa.Fliplr(1, deterministic=True)], deterministic=True) # none\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #1\n\n id_aug = id_ + '-1'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True)], deterministic=True) # horizontal flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #2\n\n id_aug = id_ + '-2'\n seq = iaa.Sequential([iaa.Flipud(1, deterministic=True)], deterministic=True) # vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #3\n\n id_aug = id_ + '-3'\n seq = iaa.Sequential([iaa.Fliplr(1, deterministic=True), iaa.Flipud(1, deterministic=True)],\n deterministic=True) # horizontal + vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #4\n\n id_aug = id_ + '-4'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\")]) # change color\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = mask\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #5\n\n id_aug = id_ + '-5'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Fliplr(1, deterministic=True)]) # change color + horizontal flip\n\n seq_mask = iaa.Sequential([iaa.Fliplr(1, deterministic=True)], deterministic=True) # horizontal flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #6\n\n id_aug = id_ + '-6'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Flipud(1, deterministic=True)]) # change color + vertical flip\n\n seq_mask = iaa.Sequential([iaa.Flipud(1, deterministic=True)], deterministic=True) # vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n\n # Transform #7\n\n id_aug = id_ + '-7'\n seq = iaa.Sequential([iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels([0, 1, 2], iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n iaa.Fliplr(1, deterministic=True),\n iaa.Flipud(1, deterministic=True)]) # change color + horizontal + vertical flip\n\n seq_mask = iaa.Sequential([iaa.Fliplr(1, deterministic=True), iaa.Flipud(1, deterministic=True)],\n deterministic=True) # horizontal + vertical flips\n\n if not os.path.exists(TRAIN_PATH_AUG + id_aug):\n os.mkdir(TRAIN_PATH_AUG + id_aug)\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/images/')\n os.mkdir(TRAIN_PATH_AUG + id_aug + '/masks/')\n img_aug = seq.augment_image(img)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/images/' + id_aug + '.png', img_aug)\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n mask_ = imread(path + '/masks/' + id_ + '.png')\n mask = np.maximum(mask, mask_)\n mask_aug = seq_mask.augment_image(mask)\n misc.imsave(TRAIN_PATH_AUG + id_aug + '/masks/' + id_aug + '.png', mask_aug)\n" } ]
4
Dheeraj1998/web-crawler
https://github.com/Dheeraj1998/web-crawler
ab5fd934ef8b6b940933b89fc2b1fd245aa66e54
eabfeff7a0b5d0d2d2885b26536a235c4f260408
11253d5a9a38b70cf16c5b8a8f6d409cb334ed5c
refs/heads/master
2019-01-06T01:55:56.075929
2017-03-01T07:41:19
2017-03-01T07:41:19
83,525,742
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7900000214576721, "avg_line_length": 19, "blob_id": "90690af183bff2a2dc4bda090dfc58b4a09a30b1", "content_id": "48ee63a8af8266eac9813d3a5c8b0e6137a4eb9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/README.md", "repo_name": "Dheeraj1998/web-crawler", "src_encoding": "UTF-8", "text": "# web-crawler\nA basic web-crawler to display info about website\n\n# Dependencies\nbs4 (BeautifulSoup)\n" }, { "alpha_fraction": 0.545073390007019, "alphanum_fraction": 0.5492662191390991, "avg_line_length": 29.774192810058594, "blob_id": "98e5348710eed0232359a3f10e21b47a8789c013", "content_id": "9dbbc9cbe4cb433cb87433f1d226ea34561ab421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 87, "num_lines": 31, "path": "/basic.py", "repo_name": "Dheeraj1998/web-crawler", "src_encoding": "UTF-8", "text": "import urllib.request as ur\nfrom bs4 import BeautifulSoup as bs\n\n#The main URL should be of the type: www.google.com\nmain_url = str(input('Enter the URL of the page: '))\nif(main_url[4] != 'http'):\n\tmain_url = 'http://' + main_url\nhtml = ur.urlopen(main_url).read()\n\nsoup = bs(html,\"html.parser\")\n\nprint('\\n*******************************\\n')\nprint('The HTML of the entire page is: \\n\\n' + soup.prettify())\nprint('\\n*******************************')\n\n#Find all the lines in HTML code which are part of the 'a' class\nlinks = soup.find_all('a')\naval_links = []\n\nprint('\\n*******************************\\n')\nprint('The links on the page are: \\n\\n')\n\ncount = 0\nfor x in links:\n\t#Used to get the text and the links in all the 'a' tags\n\tif(x.get('href') and x.get_text()):\n\t\taval_links.append(x.get('href').strip())\n\t\tprint(str(count) + ') ' + x.get_text().strip() + ' (' + x.get('href').strip() + ') ')\n\t\tcount+=1\n\t\t\nprint('\\n*******************************')\n" } ]
2
Slajni/Plemiona-Bot
https://github.com/Slajni/Plemiona-Bot
17ce702e87cca09feb96bb1949d4cb2df1dfe24f
ade270ca27684ae0327e194427a078f788170aa5
08e549b98826a356efa5029826371079e032ed29
refs/heads/master
2020-12-09T05:20:02.628540
2020-06-13T07:51:34
2020-06-13T07:51:34
233,204,651
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6275122761726379, "alphanum_fraction": 0.6520768404006958, "avg_line_length": 28.467105865478516, "blob_id": "eaf4f543f38438f33b8f39422a2f5f46b1b6f478", "content_id": "f1f8c90fcca0713abd72ca1f88d1268dd51d63b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4478, "license_type": "no_license", "max_line_length": 127, "num_lines": 152, "path": "/main.py", "repo_name": "Slajni/Plemiona-Bot", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom time import sleep\nimport math\n\nname = '' # your login\npassword = '' #your password\nvillage_cords = '537|748' # coordinates of your village\n\nradius = 15 # radius of attack range\npoints = 120 # max number of points for a villages to attack\nserver_name = 'pl148'\n\nignored_villages = ['532|746'] # coords of villages you don't want to attack but are in range\n\ndriver = webdriver.Chrome()\n\n\nnumber_spear = \"2\"\nnumber_sword = \"2\"\nnumber_knight = \"1\"\n\n\ndef get_coords(text):\n lista = text.split('|')\n coordy = lista[0][-3:] + '|' + lista[1][:3]\n return coordy\n\ndef get_distance(coords1, coords2 = village_cords):\n\n x1 = float(coords1[:3])\n x2 = float(coords2[:3])\n y1 = float(coords1[4:])\n y2 = float(coords2[4:])\n\n return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\n\ndef compare_distance(coords1, coords2 = village_cords):\n\n if get_distance(coords1) > get_distance(coords2):\n return True\n else:\n return False\n\ndef template_a():\n\n driver.find_element_by_xpath('//*[@id=\"unit_input_spear\"]').send_keys(number_spear)\n sleep(1)\n driver.find_element_by_xpath('//*[@id=\"unit_input_sword\"]').send_keys(number_sword)\n sleep(1.5)\n\ndef template_b():\n\n driver.find_element_by_xpath('//*[@id=\"unit_input_knight\"]').send_keys(number_knight)\n sleep(1)\n\n\n\ntemplates = [template_a, template_b]\n\n\ndriver.get('https://plemiona.vopo.pl/village_list/')\nfrom selenium.webdriver.support.ui import Select\nselect = Select(driver.find_element_by_id('id_world'))\n# select by visible text\n#select.select_by_visible_text('Banana')\n# select by value \nselect.select_by_value(server_name)\n\ndriver.find_element_by_xpath('//*[@id=\"id_coords\"]').send_keys(village_cords)\ndriver.find_element_by_xpath('//*[@id=\"id_size\"]').send_keys(radius)\ndriver.find_element_by_xpath('/html/body/div/div/div/form/button').click()\nsleep(2)\ndriver.find_element_by_xpath('//*[@id=\"page-wrapper\"]/div/div[2]/div[7]/div/label[3]').click()\nsleep(2)\ndriver.find_element_by_xpath('//*[@id=\"page-wrapper\"]/div/div[2]/div[5]/div/div[2]/div/input').send_keys(\"\\b\\b\\b\\b\\b\\b\"+ str(points))\nsleep(8)\ndriver.find_element_by_xpath('//*[@id=\"page-wrapper\"]/div/div[2]/div[8]/button').click()\nsleep(1)\nall_villages = driver.find_element_by_xpath('//*[@id=\"export_space_coords\"]').get_attribute('value')\nsleep(2)\n\ndriver.get(\"https://www.plemiona.pl/\")\ndriver.find_element_by_xpath('//*[@id=\"user\"]').send_keys(name)\ndriver.find_element_by_xpath('//*[@id=\"password\"]').send_keys(password)\ndriver.find_element_by_xpath('/html/body/div[3]/div[4]/div[10]/div[3]/div[2]/form/div/div/a').click()\n\n\nsleep(2)\ndriver.get('https://www.plemiona.pl/page/play/{}'.format(server_name))\nsleep(2)\ndriver.get('https://pl148.plemiona.pl/game.php?village=66423&screen=place')\n\nvillages = all_villages.split()\nattack_string_to_format = '//*[@id=\"commands_outgoings\"]/table/tbody/tr[{}]/td[1]/span/span/a/span[2]'#.format(str(i+2))\n\n# getting all attacks:\nattacked_villages = []\nfor i in range(len(villages)):\n try:\n tekst = driver.find_element_by_xpath(attack_string_to_format.format(i + 2)).text\n if 'Powr' in tekst:\n continue\n attacked_villages.append(get_coords(tekst))\n except:\n pass\nprint('Already attacked villages:')\nprint(attacked_villages)\nsleep(1)\nvillages.sort(key = get_distance)\n\ni = 0\n\n\n\nfor index, village in enumerate(villages):\n\n if village in attacked_villages or village in ignored_villages:\n continue\n\n\n coords = '//*[@id=\"place_target\"]/input'\n driver.find_element_by_xpath(coords).send_keys(village)\n sleep(1)\n\n templates[i]()\n\n attack = '//*[@id=\"target_attack\"]'\n driver.find_element_by_xpath(attack).click()\n try:\n attack_confirm = '//*[@id=\"troop_confirm_go\"]'\n driver.find_element_by_xpath(attack_confirm).click()\n print(\"Attacking village: {}\".format(village))\n except:\n print('Not enough units to run tamplate {}'.format(str(templates[i])))\n i += 1\n if i < len(templates):\n driver.get('https://pl148.plemiona.pl/game.php?village=66423&screen=place')\n coords = '//*[@id=\"place_target\"]/input'\n driver.find_element_by_xpath(coords).send_keys(village)\n sleep(1)\n templates[i]()\n driver.find_element_by_xpath(attack).click()\n driver.get('https://pl148.plemiona.pl/game.php?village=66423&screen=place')\n else:\n break\n\n\n\n\nsleep(2)\ndriver.quit()" }, { "alpha_fraction": 0.6791045069694519, "alphanum_fraction": 0.7089552283287048, "avg_line_length": 14, "blob_id": "c0a5f1f1e9ef3b7626e3b9cf39947ca0a1d03352", "content_id": "65d9a4b77873900198e2d2db6fb891786e2423d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/runner.py", "repo_name": "Slajni/Plemiona-Bot", "src_encoding": "UTF-8", "text": "import os\nfrom time import sleep\n\nminutes_interval = 30\n\nwhile True:\n\n os.system('python main.py')\n sleep(minutes_interval * 60)" }, { "alpha_fraction": 0.7976653575897217, "alphanum_fraction": 0.7976653575897217, "avg_line_length": 50.400001525878906, "blob_id": "77ff2a9ebfa251b8387b5549aa2b7273b9d8300e", "content_id": "19d09d5d4d2f9fcfe265f72526a15c5081af5134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 257, "license_type": "no_license", "max_line_length": 152, "num_lines": 5, "path": "/README.md", "repo_name": "Slajni/Plemiona-Bot", "src_encoding": "UTF-8", "text": "# Plemiona-Bot\n\nThis bot performs most annoying task in the game and can be run whenever you don't want to play and for example sleep to keep attacking nearby villages.\n\nProject is based on Selenium and was made mainly for fun and learning Selenium itself.\n" } ]
3
Raj-Kumar2208/linkedList-Stacks-Queues-Hashing
https://github.com/Raj-Kumar2208/linkedList-Stacks-Queues-Hashing
9db22accdb7a2b802f40a01f56b568bf28625e25
7cd9103099232a14b6ac743ff2237784fa13e2c7
1e7eb01397d0266c7c86ba7fc78bdce3fddfc84c
refs/heads/master
2020-08-12T01:08:08.829950
2019-10-24T15:51:45
2019-10-24T15:51:45
214,661,578
0
0
null
2019-10-12T14:32:09
2019-10-14T18:10:44
2019-10-16T19:20:48
Python
[ { "alpha_fraction": 0.4870245158672333, "alphanum_fraction": 0.4959118366241455, "avg_line_length": 27.746030807495117, "blob_id": "ad0fae04deb9a801dca72195afacdfdc36f7ce6f", "content_id": "30d813f3b2e0b0977040788218a2a5ac3ed4d0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5626, "license_type": "no_license", "max_line_length": 69, "num_lines": 189, "path": "/DoubleLinkedList.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 12 20:06:42 2019\r\n\r\n@author: [email protected]\r\n\r\nWe perform all the required operation on double linked list \r\n1) Insertion\r\n2) Deletion\r\n3) Traversing\r\n4) Print Reverse\r\n5) Reverse\r\n\"\"\"\r\nclass Node:\r\n def __init__(self, data=None, next_p=None, prev=None):\r\n self.data = data\r\n self.next_p = next_p\r\n self.prev = prev\r\n \r\n def set_data(self,data):\r\n self.data = data\r\n def get_data(self):\r\n return self.data\r\n \r\n def set_next(self,next_p):\r\n self.next_p = next_p\r\n def get_next(self):\r\n return self.next_p\r\n \r\n def set_prev(self,prev):\r\n self.prev = prev\r\n def get_prev(self):\r\n return self.prev\r\n \r\n def has_next(self):\r\n return self.next_p != None\r\n def has_prev(self):\r\n return self.prev != None\r\n\r\ndef traversing_count(linkedlist):\r\n count = linkedlist.length\r\n current = linkedlist.head\r\n while current is not None:\r\n print(\"Data is: \"+str(current.get_data()))\r\n current = current.get_next()\r\n print(\"count is: \"+str(count))\r\n\r\nclass DoubleLL:\r\n \r\n def __init__(self,head=None,tail=None):\r\n self.head = head\r\n self.tail = tail\r\n self.length = 0\r\n \r\n \r\n \r\n def insert_at_start(self,data):\r\n new_node = Node(data)\r\n if ((self.length == 0) & (self.head == None)):\r\n self.head = new_node\r\n self.tail = new_node\r\n else:\r\n new_node.set_next(self.head)\r\n new_node.set_prev(None)\r\n self.head = new_node\r\n self.length +=1\r\n \r\n def insert_at_end(self,data):\r\n new_node = Node(data)\r\n if ((self.length == 0) & (self.head == None)):\r\n self.head = new_node\r\n self.tail = new_node\r\n else:\r\n current = self.head\r\n while(current.get_next() != None):\r\n current = current.get_next()\r\n current.set_next(new_node)\r\n new_node.set_next(None)\r\n new_node.set_prev(current)\r\n self.tail = new_node\r\n self.length +=1\r\n \r\n def insert_at_pos(self,data,pos):\r\n if ((pos <= 0) | (pos > self.length)):\r\n raise ValueError(\"Entered position value is incorrect\")\r\n elif pos ==1:\r\n self.insert_at_start(data)\r\n elif pos == self.length:\r\n self.insert_at_end(data)\r\n else:\r\n new_node = Node(data)\r\n previous = self.head\r\n current = self.head\r\n count = 1\r\n while (count < pos):\r\n count += 1\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(new_node)\r\n current.set_prev(new_node)\r\n new_node.set_next(current)\r\n new_node.set_prev(previous)\r\n self.length +=1\r\n \r\n def delete_at_start(self):\r\n if ((self.length == 0) & (self.head == None)):\r\n raise ValueError(\"List is Empty\")\r\n else:\r\n previous = self.head\r\n current = self.head\r\n current = current.get_next()\r\n self.head = current\r\n previous.set_next(None)\r\n current.set_prev(None)\r\n self.length -= 1\r\n \r\n def delete_at_end(self):\r\n if((self.length == 0) & (self.head == None)):\r\n raise ValueError(\"List is Empty\")\r\n else:\r\n previous = self.head\r\n current = self.head\r\n while(current.get_next() != None):\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(None)\r\n current.set_prev(None)\r\n self.length -= 1\r\n \r\n def delete_at_pos(self,pos):\r\n if ((self.length == 0) | (self.head == None)):\r\n raise ValueError(\"List is Empty\")\r\n elif ((pos < 0) | (pos > self.length)):\r\n raise ValueError(\"Check the entered position: \"+str(pos))\r\n elif pos == 1:\r\n self.delete_at_start()\r\n elif pos == self.length:\r\n self.delete_at_end()\r\n else:\r\n previous = self.head\r\n current = self.head\r\n count = 1\r\n while (count < pos):\r\n count += 1\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(current.get_next())\r\n current.set_prev(None)\r\n temp = current\r\n current = current.get_next()\r\n current.set_prev(previous)\r\n temp.set_next(None)\r\n self.length -=1\r\n\r\n\r\n def print_reversing(self):\r\n current = self.tail\r\n count = self.length\r\n while (count !=0):\r\n print(\"Reverse Data is: \"+str(current.get_data()))\r\n current = current.get_prev()\r\n count -=1\r\n\r\n def reverse(self):\r\n temp = None\r\n current = self.head\r\n while current is not None:\r\n temp = current.get_prev()\r\n current.set_prev(current.get_next())\r\n current.set_next(temp)\r\n current = current.get_prev()\r\n if temp is not None:\r\n self.head = temp.get_prev()\r\n \r\n \r\n\r\n\r\n \r\nif __name__==\"__main__\":\r\n mylist = DoubleLL()\r\n mylist.insert_at_start(5)\r\n mylist.insert_at_end(6)\r\n mylist.insert_at_end(7)\r\n mylist.insert_at_pos(1,1)\r\n mylist.insert_at_pos(8,4)\r\n mylist.insert_at_pos(10,2)\r\n traversing_count(mylist)\r\n mylist.reverse()\r\n traversing_count(mylist)\r\n " }, { "alpha_fraction": 0.5126705765724182, "alphanum_fraction": 0.5256659984588623, "avg_line_length": 22.046875, "blob_id": "da409fa94da8c1b8579694214cf1251a62ac3a55", "content_id": "ef0415f375d645f932b5dabf33ed5894069c042f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 57, "num_lines": 64, "path": "/dynArrayStack.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 21 20:40:00 2019\r\n\r\n@author: [email protected]\r\n\r\nSTack implementation based on Dynamic Array\r\n\r\nHere we are going to array doubling technique\r\nWhen array get full we increase its size by doubling it\r\n\"\"\"\r\n\r\nclass StackDynaArray:\r\n def __init__(self,limit=2):\r\n \"\"\" please change the limit value for first array\r\n initilization\"\"\"\r\n self.limit = limit\r\n self.arr =[]\r\n \r\n def is_empty(self):\r\n if len(self.arr) == 0:\r\n return True\r\n \r\n def push(self,data):\r\n if len(self.arr) >= self.limit:\r\n self.resize()\r\n self.arr.append(data)\r\n print(\"Stack after push: \",self.arr)\r\n \r\n def pop(self):\r\n if self.is_empty():\r\n print(\"Stack Underflow\")\r\n else:\r\n return self.arr.pop()\r\n \r\n def peek(self):\r\n if self.is_empty():\r\n print(\"Stack Underflow\")\r\n else:\r\n return self.arr[-1]\r\n \r\n def print_stack(self):\r\n print(\"stack is: \",self.arr)\r\n \r\n def resize(self):\r\n temp = self.arr\r\n self.limit = 2*self.limit\r\n self.arr = temp\r\n \r\n def stack_size(self):\r\n print(\"length of stack is: \", len(self.arr))\r\n\r\nif __name__ == \"__main__\":\r\n stack = StackDynaArray()\r\n stack.pop()\r\n stack.print_stack()\r\n stack.push(1)\r\n stack.push(2)\r\n stack.push(3)\r\n stack.stack_size()\r\n stack.pop()\r\n stack.stack_size()\r\n print(stack.peek())\r\n stack.print_stack()\r\n" }, { "alpha_fraction": 0.49864938855171204, "alphanum_fraction": 0.5089141130447388, "avg_line_length": 22.302631378173828, "blob_id": "7f3d40cfb1caf93eb6aa62798c8a2ac0f1b3c994", "content_id": "165e55b5895e1583fcd87b64a825305e5b8f64c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1851, "license_type": "no_license", "max_line_length": 56, "num_lines": 76, "path": "/linkedlistStack.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 19 21:00:38 2019\r\n\r\n@author: [email protected]\r\n\r\nStack Implementation based on LinkedList\r\n\r\nInsertion and Deletion only happend at start\r\n\r\n\"\"\"\r\nclass Node:\r\n def __init__(self,data,next_p=None):\r\n self.data = data\r\n self.next_p = next_p\r\n \r\n def set_data(self,data):\r\n self.data = data\r\n def get_data(self):\r\n return self.data\r\n \r\n def set_next(self,next_p):\r\n self.next_p = next_p\r\n def get_next(self):\r\n return self.next_p\r\n \r\n def has_next(self):\r\n return self.next_p!= None\r\n\r\nclass StackLinkedList:\r\n def __init__(self,head=None):\r\n self.head = head\r\n self.length = 0\r\n \r\n def push(self,data):\r\n new_node = Node(data)\r\n if self.length == 0:\r\n self.head = new_node\r\n new_node.next_p = None\r\n else:\r\n new_node.next_p = self.head\r\n self.head = new_node\r\n self.length +=1\r\n \r\n def pop(self):\r\n if self.head == None:\r\n print(\"stack underflow\")\r\n else:\r\n current = self.head\r\n self.head = current.get_next()\r\n current.set_next(None)\r\n \r\n def peek(self):\r\n if self.head == None:\r\n print(\"Stack Underflow\")\r\n else:\r\n print(\"top data is: \", self.head.get_data())\r\n return self.head.get_data()\r\n \r\n def print_stack(self):\r\n current = self.head\r\n while current is not None:\r\n print(\"stack: \", current.get_data())\r\n current = current.get_next()\r\n\r\nif __name__==\"__main__\":\r\n stack = StackLinkedList()\r\n stack.pop()\r\n stack.peek()\r\n stack.push(2)\r\n stack.push(3)\r\n stack.push(4)\r\n stack.print_stack()\r\n stack.pop()\r\n stack.print_stack()\r\n stack.peek()\r\n " }, { "alpha_fraction": 0.4741823077201843, "alphanum_fraction": 0.4839780926704407, "avg_line_length": 27.735000610351562, "blob_id": "48d13a0b8c83ad4fb5421acec3428a6c66e7fd84", "content_id": "5674a51a5e56b0d11adec151b8e0418d2bd502c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6023, "license_type": "no_license", "max_line_length": 71, "num_lines": 200, "path": "/LinkedList.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 12 00:04:38 2019\r\n\r\n@author: [email protected]\r\n\r\nHere we are going to perform all operation related to Singly\r\nLinked List.\r\n1) Insertion\r\n2) Deletion\r\n3) Traversing and Count\r\n4) Reverse the linked list.\r\n5) Searching\r\n\"\"\"\r\nclass Node:\r\n # Default constructor\r\n def __init__(self):\r\n self.data = None\r\n self.next_p = None\r\n # set next pointer\r\n def set_next(self,next_p):\r\n self.next_p = next_p\r\n # get next pointer\r\n def get_next(self):\r\n return self.next_p\r\n \r\n #set data\r\n def set_data(self,data):\r\n self.data = data\r\n #get data\r\n def get_data(self):\r\n return self.data\r\n #return true if node points to next pointer\r\n def has_next(self):\r\n return self.next_p != None\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self,head=None):\r\n self.length=0\r\n self.head = head\r\n \r\n def insert_at_beginning(self,data):\r\n new_node = Node()\r\n new_node.set_data(data)\r\n if self.length == 0:\r\n self.head = new_node\r\n else:\r\n new_node.set_next(self.head)\r\n self.head = new_node\r\n self.length +=1\r\n \r\n def insert_at_end(self,data):\r\n new_node = Node()\r\n new_node.set_data(data)\r\n if self.length == 0:\r\n self.head = new_node\r\n else:\r\n current = self.head\r\n while current.get_next() != None:\r\n current= current.get_next()\r\n current.set_next(new_node)\r\n self.length +=1\r\n \r\n def insert_at_position(self,data,pos):\r\n if ((pos <= 0) | (pos > self.length)):\r\n raise ValueError(\"Check Position value\")\r\n elif ((self.length == 0) | (pos==1)):\r\n self.insert_at_beginning(data)\r\n elif pos == self.length:\r\n self.insert_at_end(data)\r\n else:\r\n new_node = Node()\r\n new_node.set_data(data)\r\n count = 1\r\n current = self.head\r\n while count != pos-1:\r\n count +=1\r\n current = current.get_next()\r\n new_node.set_next(current.get_next())\r\n current.set_next(new_node)\r\n self.length +=1\r\n \r\n def delete_at_start(self):\r\n if self.length == 0:\r\n raise ValueError(\"Linked is Empty\")\r\n else:\r\n current = self.head\r\n self.head = self.head.get_next()\r\n current.set_next(None)\r\n self.length -= 1\r\n \r\n def delete_at_pos(self,pos):\r\n if self.length == 0:\r\n raise ValueError(\"List is Empty\")\r\n elif ((pos <= 0) | (pos > self.length)):\r\n raise ValueError(\"Check the value of position\")\r\n elif pos == 1:\r\n self.delete_at_start()\r\n elif pos == self.length:\r\n self.delete_at_end()\r\n else:\r\n count =1\r\n previous = self.head\r\n current = self.head\r\n while count < pos:\r\n count +=1\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(current.get_next())\r\n self.length -=1\r\n \r\n \r\n \r\n def delete_at_end(self):\r\n if self.length == 0:\r\n raise ValueError(\"List is Empty\")\r\n else:\r\n previous = self.head\r\n current = self.head.get_next()\r\n while current.get_next() != None:\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(None)\r\n self.length -= 1\r\n\r\n\r\n def delete_node(self,data):\r\n if self.length == 0:\r\n raise ValueError(\"List is EMpty\")\r\n else:\r\n previous = None\r\n current = self.head\r\n found = False\r\n while not found:\r\n if current.get_data() == data:\r\n found = True\r\n elif current is None:\r\n print(\"Node not found\")\r\n break\r\n else:\r\n previous = current\r\n current = current.get_next()\r\n if previous is None:\r\n self.head = current.get_next()\r\n else:\r\n previous.set_next(current.get_next())\r\n self.length -=1\r\n\r\n \r\n def searching_node_get_position(self,data):\r\n if self.length == 0:\r\n raise ValueError(\"List is Empty\")\r\n else:\r\n current = self.head\r\n found = False\r\n count = 1\r\n while not found:\r\n if current.get_data() == data:\r\n print(\"given node data: \",current.get_data(),count)\r\n found = True\r\n elif current is None:\r\n raise ValueError(\"Node not found\")\r\n else:\r\n current = current.get_next()\r\n count +=1\r\n\r\n\r\n#count number of element and print their value\r\ndef traversing_count(linkedlist):\r\n count = linkedlist.length\r\n current = linkedlist.head\r\n while current is not None:\r\n print(\"Data is: \"+str(current.get_data()))\r\n current = current.get_next()\r\n print(\"count is: \"+str(count))\r\n\r\ndef reverse_linked_list(linkedlist):\r\n prev = None\r\n next_p = None\r\n current = linkedlist.head\r\n while (current is not None):\r\n next_p = current.get_next()\r\n current.set_next(prev)\r\n prev = current\r\n current = next_p\r\n linkedlist.head = prev\r\n\r\nif __name__ ==\"__main__\":\r\n raj = LinkedList()\r\n raj.insert_at_end(10)\r\n raj.insert_at_position(20,1)\r\n raj.insert_at_end(2)\r\n raj.insert_at_end(30)\r\n raj.insert_at_end(60)\r\n raj.insert_at_end(70)\r\n raj.insert_at_end(80)\r\n traversing_count(raj)\r\n reverse_linked_list(raj)\r\n traversing_count(raj)\r\n \r\n\r\n \r\n \r\n \r\n\r\n \r\n \r\n" }, { "alpha_fraction": 0.48724761605262756, "alphanum_fraction": 0.4962805509567261, "avg_line_length": 27.44881820678711, "blob_id": "5efe6d19820d3fa3f84ed6c5f67b647b31485b97", "content_id": "0cd1d4da808e9caba543e418b2e42afec09c660b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3764, "license_type": "no_license", "max_line_length": 58, "num_lines": 127, "path": "/CircularLinkedList.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 14 08:36:31 2019\r\n\r\n@author: [email protected]\r\n\r\nOperation we performed here is:\r\n 1) Insertion\r\n 2) Deletion\r\n 3) COunting\r\n 4) Traversing\r\n\"\"\"\r\nclass Node:\r\n def __init__(self,data=None,next_p=None):\r\n self.data = data\r\n self.next_p = next_p\r\n \r\n def set_next(self,next_p):\r\n self.next_p = next_p\r\n def get_next(self):\r\n return self.next_p\r\n \r\n def set_data(self,data):\r\n self.data = data\r\n def get_data(self):\r\n return self.data\r\n \r\n def has_next(self):\r\n return self.next_p != None\r\n\r\nclass CircularLL:\r\n def __init__(self,head=None):\r\n self.head = head\r\n self.length =0\r\n \r\n def insert_at_start(self,data):\r\n new_node = Node(data)\r\n if self.length ==0:\r\n self.head = new_node\r\n new_node.set_next(new_node)\r\n else:\r\n new_node.set_next(new_node)\r\n current = self.head.get_next()\r\n while current.get_next() != self.head:\r\n current = current.get_next()\r\n current.set_next(new_node)\r\n new_node.set_next(self.head)\r\n self.head = new_node\r\n self.length +=1\r\n \r\n def insert_at_end(self,data):\r\n new_node = Node(data)\r\n if self.length == 0:\r\n self.head = new_node\r\n new_node.set_next(new_node)\r\n else:\r\n new_node.set_next(new_node)\r\n current = self.head.get_next()\r\n while current.get_next() != self.head:\r\n current = current.get_next()\r\n current.set_next(new_node)\r\n new_node.set_next(self.head)\r\n self.length +=1\r\n \r\n def delete_at_start(self):\r\n if self.length == 0:\r\n raise ValueError(\"List is Empty\")\r\n else:\r\n temp = self.head\r\n current = self.head.get_next()\r\n while current.get_next() != self.head:\r\n current = current.get_next()\r\n current.set_next(temp.get_next())\r\n self.head = temp.get_next()\r\n temp.set_next(None)\r\n self.length -= 1\r\n \r\n \r\n def delete_at_end(self):\r\n if self.length ==0:\r\n raise ValueError(\"List is EMpty\")\r\n else:\r\n current = self.head.get_next()\r\n previous = self.head.get_next()\r\n while current.get_next() != self.head:\r\n previous = current\r\n current = current.get_next()\r\n previous.set_next(self.head)\r\n current.set_next(None)\r\n self.length -=1\r\n \r\n #here in reverse logic i am not changing head node\r\n#sample input A--> B --> C --> D --> back to A\r\n#sample output A --> D --> C --> B --> back to A \r\n def reverse(self):\r\n current = self.head.get_next()\r\n prev = self.head\r\n next_p = None\r\n while current != self.head:\r\n next_p = current.get_next()\r\n current.set_next(prev)\r\n prev = current\r\n current = next_p\r\n self.head.set_next(prev)\r\n \r\n \r\n \r\ndef traversing_count(clist):\r\n count = clist.length\r\n print(\"Count is: \"+str(count))\r\n temp = clist.head\r\n while count != 0:\r\n print(\"Data is: \"+str(temp.get_data()))\r\n temp= temp.get_next()\r\n count -= 1\r\n\r\nif __name__ == \"__main__\":\r\n clist = CircularLL()\r\n clist.insert_at_end(2)\r\n clist.insert_at_start(3)\r\n clist.insert_at_start(4)\r\n clist.insert_at_start(5)\r\n clist.insert_at_start(6)\r\n clist.insert_at_start(7)\r\n traversing_count(clist)\r\n clist.reverse()\r\n traversing_count(clist)\r\n \r\n\r\n\r\n \r\n " }, { "alpha_fraction": 0.48573756217956543, "alphanum_fraction": 0.5028524994850159, "avg_line_length": 21.365385055541992, "blob_id": "b5143398b2f2ae7501aae9c16d4b7d2bcbf72f9e", "content_id": "3386c2dd27e53f3b4b28501b9bb595804b90df08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1227, "license_type": "no_license", "max_line_length": 47, "num_lines": 52, "path": "/arrayStack.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 19 20:03:23 2019\r\n\r\n@author: [email protected]\r\n\r\nSTack implementation using Array\r\n\"\"\"\r\n\r\nclass StackArray:\r\n def __init__(self,limit=10):\r\n self.limit = limit\r\n self.arr = []\r\n \r\n def push(self,data):\r\n if len(self.arr) >= self.limit:\r\n raise ValueError(\"Stack Overflow\")\r\n else:\r\n self.arr.append(data)\r\n print(\"Stack After Push: \", self.arr)\r\n \r\n def is_empty(self):\r\n if len(self.arr) == 0:\r\n return True\r\n \r\n def pop(self):\r\n if self.is_empty():\r\n raise ValueError(\"Stack Underflow\")\r\n else:\r\n return self.arr.pop()\r\n \r\n def peek(self):\r\n if self.is_empty():\r\n raise ValueError(\"Stack Underflow\")\r\n else:\r\n return self.arr[-1]\r\n \r\n def stack_size(self):\r\n return len(self.arr)\r\n \r\n def print_stack(self):\r\n print(self.arr)\r\n\r\nif __name__ == \"__main__\":\r\n stack_one = StackArray(5)\r\n print(stack_one.is_empty())\r\n stack_one.push(5)\r\n stack_one.push(6)\r\n stack_one.push(7)\r\n print(stack_one.peek())\r\n stack_one.pop()\r\n stack_one.print_stack()\r\n \r\n \r\n" }, { "alpha_fraction": 0.4926624596118927, "alphanum_fraction": 0.5083857178688049, "avg_line_length": 25.68115997314453, "blob_id": "3d3e33edb3ad498cf64aed2e031b2f997cd31fe5", "content_id": "651bb479d714439457d1ad3674f769cbc25967ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1908, "license_type": "no_license", "max_line_length": 79, "num_lines": 69, "path": "/inTopost.py", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 19 21:36:42 2019\r\n\r\n@author: [email protected]\r\n\r\nInfix to Postfix Expression Conversion using Stack\r\n\r\nfor this we have create a table in which operational precedence should be\r\ndefined\r\n\"\"\"\r\n\r\nclass stack:\r\n def __init__(self):\r\n self.arr = []\r\n \r\n def push(self,data):\r\n self.arr.append(data)\r\n \r\n def pop(self):\r\n return self.arr.pop()\r\n \r\n def peek(self):\r\n return self.arr[-1]\r\n \r\n def __str__(self):\r\n return str(self.arr)\r\n def is_empty(self):\r\n if len(self.arr) == 0:\r\n return True\r\n else:\r\n return False\r\n\r\ndef infix_to_postfix_conversion(string):\r\n \"\"\" First we set the precedence of operator and then implement \r\n algorithm \"\"\"\r\n pred = {}\r\n pred['*'] = 3\r\n pred['/'] = 3\r\n pred['+'] = 2\r\n pred['-'] = 2\r\n pred['('] = 1\r\n \r\n stack_output = stack()\r\n postfixList = []\r\n token = string.split()\r\n for i in token:\r\n if i in \"QWERASDFZXCVTYUIOPGHJKLBNM0123456789\":\r\n postfixList.append(i)\r\n elif i == '(':\r\n stack_output.push(i)\r\n elif i == \")\":\r\n topi = stack_output.pop()\r\n while topi != \"(\":\r\n postfixList.append(topi)\r\n topi = stack_output.pop()\r\n else:\r\n while (not stack_output.is_empty() and\r\n pred[stack_output.peek()] >= pred[i]):\r\n postfixList.append(stack_output.pop())\r\n stack_output.push(i)\r\n while not stack_output.is_empty():\r\n postfixList.append(stack_output.pop())\r\n return \"\".join(postfixList)\r\n\r\nif __name__ == \"__main__\":\r\n print(infix_to_postfix_conversion(\"A + B + C * D\"))\r\n print(infix_to_postfix_conversion(\"( A + B ) * C - ( D - E ) * ( F + G )\"))\r\n print(infix_to_postfix_conversion(\"( A + B + C * ( D * E / F ) )\"))" }, { "alpha_fraction": 0.7905759215354919, "alphanum_fraction": 0.7979057431221008, "avg_line_length": 44.47618865966797, "blob_id": "961860cef0b0dae72d3892f1bb63d95e3ed50f79", "content_id": "b0e06df892fe8736cc22c4ee9025daa4218a4dd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 955, "license_type": "no_license", "max_line_length": 212, "num_lines": 21, "path": "/README.md", "repo_name": "Raj-Kumar2208/linkedList-Stacks-Queues-Hashing", "src_encoding": "UTF-8", "text": "# linkedList-Stacks-Queues-Hashing\nProgramming Concepts Questions\n\n\nIn this repository, you will find basic implementation of linked list, Stacks, Queues and Hashing.\n\n### Linked List\nIn Linked List, we have implemented few of basic implementations such as\n1) Singly Linked List\n2) Doubly Linked List\n3) Circular Linked List\n4) Skip List\n\nEach Linked list have following methods such as Insertion, Deletion, Searching, and Traversing. You can refer to python code. In case you want us to implement something more than this. Kindly email us at [email protected].\nWould like to help other fellow, Thanks\n\n### Stacks\nIn stacks, following are\n1) Stack implementation based on Array (in python we have used list data structure for array implementation instead of numpy.array)\n2) Stack implementation based on Dynamic Array. (here dynamic array means, we are using array doubling technique, to optimize above implementation.\n3) Stack implementation based on Linked List.\n" } ]
8
5tf/colours_calibration
https://github.com/5tf/colours_calibration
1fbd0b3e62ceb0bdd7bfad64731b580f7bf3b41d
1bb639b497371022fbf67e1e3fb4dae84a50172d
3206900448b9257beff473d9c2357c43c2c1836f
refs/heads/master
2022-11-10T04:27:33.123425
2020-06-22T07:29:03
2020-06-22T07:29:03
274,043,383
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.597122311592102, "alphanum_fraction": 0.6906474828720093, "avg_line_length": 33.75, "blob_id": "e1bb261d61668528104b18c1413370066fe2367b", "content_id": "d1d61ee19631fa1d8b2360f54bde47fb310b75a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 50, "num_lines": 4, "path": "/CalibrationTestPOST.py", "repo_name": "5tf/colours_calibration", "src_encoding": "UTF-8", "text": "import requests\nurl = 'http://127.0.0.1:5000/calibrate_colours'\ndata = open('image.jpg', 'rb').read()\nr = requests.post(url, data=data)\n" }, { "alpha_fraction": 0.46662047505378723, "alphanum_fraction": 0.5310695171356201, "avg_line_length": 35.378150939941406, "blob_id": "fc355498e4d9c6134be4ffca0993d1068da118fe", "content_id": "e0b6c2ba6b58bb628e645a1196b9519aa91ef381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4329, "license_type": "no_license", "max_line_length": 80, "num_lines": 119, "path": "/CalibrationTest.py", "repo_name": "5tf/colours_calibration", "src_encoding": "UTF-8", "text": "from flask import Flask, request\nimport numpy as np\nfrom plantcv import plantcv as pcv\nimport cv2\n\napp = Flask(__name__)\n\n\ndef apply_transformation_matrix(source_img, transformation_matrix):\n\n # split transformation_matrix\n red, green, blue, red2, green2, blue2, red3, green3, blue3 = np.split(\n transformation_matrix, 9, 1)\n\n # find linear, square, and cubic values of source_img color channels\n source_b, source_g, source_r = cv2.split(source_img)\n source_b2 = np.square(source_b)\n source_b3 = np.power(source_b, 3)\n source_g2 = np.square(source_g)\n source_g3 = np.power(source_g, 3)\n source_r2 = np.square(source_r)\n source_r3 = np.power(source_r, 3)\n\n # apply linear model to source color channels\n b = 0 + source_r * blue[0] + source_g * blue[1] + source_b * blue[\n 2] + source_r2 * blue[3] + source_g2 * blue[\n 4] + source_b2 * blue[5] + source_r3 * blue[6] + source_g3 * blue[\n 7] + source_b3 * blue[8]\n g = 0 + source_r * green[0] + source_g * green[1] + source_b * green[\n 2] + source_r2 * green[3] + source_g2 * green[\n 4] + source_b2 * green[5] + source_r3 * green[6] + source_g3 * \\\n green[7] + source_b3 * green[8]\n r = 0 + source_r * red[0] + source_g * red[1] + source_b * red[\n 2] + source_r2 * red[3] + source_g2 * red[\n 4] + source_b2 * red[5] + source_r3 * red[6] + source_g3 * red[\n 7] + source_b3 * red[8]\n\n # merge corrected color channels onto source_image\n bgr = [b, g, r]\n corrected_img = cv2.merge(bgr)\n\n # round corrected_img elements to be within range and of the correct data\n # type\n corrected_img = np.rint(corrected_img)\n corrected_img[np.where(corrected_img > 255)] = 255\n corrected_img = corrected_img.astype(np.uint8)\n\n # return corrected_img\n return corrected_img\n\n\[email protected]('/calibrate_colours', methods=['POST'])\ndef calibrate_colours():\n byte_img = request.data\n np_arr = np.fromstring(byte_img, dtype='uint8')\n rgb_img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n df, start, space = pcv.transform.find_color_card(rgb_img=rgb_img)\n mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=10,\n start_coord=start,\n spacing=space, ncols=6, nrows=4)\n\n headers, color_matrix = pcv.transform.get_color_matrix(rgb_img, mask)\n print(headers)\n print(color_matrix)\n\n xrite_values = [115., 82., 68.,\n 194., 150., 130.,\n 98., 122., 157.,\n 87., 108., 67.,\n 133., 128., 177.,\n 103., 189., 170.,\n 214., 126., 44.,\n 80., 91., 166.,\n 193., 90., 99.,\n 94., 60., 108.,\n 157., 188., 64.,\n 224., 163., 46.,\n 56., 61., 150.,\n 70., 148., 73.,\n 175., 54., 60.,\n 231., 199., 31.,\n 187., 86., 149.,\n 8., 133., 161.,\n 243., 243., 242.,\n 200., 200., 200.,\n 160., 160., 160.,\n 122., 122., 121.,\n 85., 85., 85.,\n 52., 52., 52.]\n\n target_color_matrix = np.zeros((len(np.unique(mask)) - 1, 4))\n row_counter = 0\n for i in np.unique(mask):\n if i != 0:\n target_color_matrix[row_counter][0] = i\n target_color_matrix[row_counter][1] = xrite_values[row_counter]\n target_color_matrix[row_counter][2] = xrite_values[row_counter + 1]\n target_color_matrix[row_counter][3] = xrite_values[row_counter + 2]\n row_counter += 1\n\n print(target_color_matrix)\n\n matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(\n target_color_matrix,\n color_matrix)\n\n print(\"Moore-Penrose Inverse Matrix: \")\n print(matrix_m)\n\n deviance, transformation_matrix = pcv.transform.calc_transformation_matrix(\n matrix_m, matrix_b)\n\n corrected_img = apply_transformation_matrix(rgb_img, transformation_matrix)\n\n pcv.print_image(corrected_img, 'corrected.jpg')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.7743055820465088, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 37.400001525878906, "blob_id": "fab30de6c60c2f4573a04dc113f84f28086e8783", "content_id": "f4113097aa24d6a234db9699543c7f1c94d41cc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 583, "license_type": "no_license", "max_line_length": 145, "num_lines": 15, "path": "/README.md", "repo_name": "5tf/colours_calibration", "src_encoding": "UTF-8", "text": "# colours_calibration\n\n<b>CalibrationTest.py</b> - jednofunkcyjny serwer\n\n<b>CalibrationTestPOST.py</b> - skrypt przesyล‚ajฤ…cy zapytanie z obrazem\n\n<b>color-chart-v10.jpg</b> - dokument dotyczฤ…cy kolorรณw uzyskiwanych w reakcjach\n\n<b>corrected.jpg</b> - obraz po transformacji\n\n<b>image.jpg</b> - zmierzony obraz z wzorcem XRite\n\n<a href=\"https://plantcv.readthedocs.io/en/latest/transform_correct_color/\">Dokumentacja PlantCV</a>\n\n<a href=\"https://github.com/danforthcenter/plantcv/blob/master/plantcv/plantcv/transform/color_correction.py\">Kod ลบrรณdล‚owy biblioteki PlantCV</a>\n" } ]
3
davecliff/BristolStockExchange
https://github.com/davecliff/BristolStockExchange
878adcd746c5137ab421b76e136292bc37520374
15481a617c39035636a737fb2440b9f0c5ea1f78
25e21a3702dbf2dcda34a4872c2cb986a6bf8fea
refs/heads/master
2023-08-03T17:14:31.582935
2023-07-20T09:53:51
2023-07-20T09:53:51
6,364,335
288
123
NOASSERTION
2012-10-24T04:01:21
2022-11-29T05:57:13
2022-12-06T19:38:02
Python
[ { "alpha_fraction": 0.5454613566398621, "alphanum_fraction": 0.5598057508468628, "avg_line_length": 45.12413787841797, "blob_id": "67988fd8e7455ca2df5638278d672cfae1871099", "content_id": "6709e4cc44f02ef4d033da8b1a94b5fa854853fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13385, "license_type": "permissive", "max_line_length": 189, "num_lines": 290, "path": "/Trader_AA.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "'''\nCreated on 1 Dec 2012\n\n@author: Ash Booth\n\nAA order execution strategy as described in: \"Perukrishnen, Cliff and Jennings (2008) \n'Strategic Bidding in Continuous Double Auctions'. Artificial Intelligence Journal, \n172, (14), 1700-1729\".\n\n With notable...\n Amendments:\n - slightly modified equilibrium price updating\n - spin up period instead of rounds\n\n Additions:\n - Includes functions for using Newton-Rhapson method for finding \n complementary theta values.\n\n'''\nimport math\nimport random\n\nclass Trader_AA(object):\n\n def __init__(self):\n\n # External parameters (you must choose [optimise] values yourselves)\n self.spin_up_time = 20\n self.eta = 3.0\n self.theta_max = 2.0\n self.theta_min = -8.0\n self.lambda_a = 0.01\n self.lambda_r = 0.02\n self.beta_1 = 0.4\n self.beta_2 = 0.4\n self.gamma = 2.0\n self.nLastTrades = 5 # N in AIJ08\n self.ema_param = 2 / float(self.nLastTrades + 1)\n self.maxNewtonItter = 10\n self.maxNewtonError = 0.0001\n \n # The order we're trying to trade\n self.orders = []\n self.limit = None\n self.active = False\n self.job = None\n \n # Parameters describing what the market looks like and it's contstraints\n self.marketMax = bse_sys_maxprice\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n \n # Internal parameters (spin up time need to get values for some of these)\n self.eqlbm = None\n self.theta = -1.0 * (5.0 * random.random())\n self.smithsAlpha = None\n self.lastTrades = []\n self.smithsAlphaMin = None\n self.smithsAlphaMax = None\n \n self.aggressiveness_buy = -1.0 * (0.3 * random.random())\n self.aggressiveness_sell = -1.0 * (0.3 * random.random())\n self.target_buy = None\n self.target_sell = None\n\n def updateEq(self, price):\n # Updates the equilibrium price estimate using EMA\n if self.eqlbm == None: self.eqlbm = price\n else: self.eqlbm = self.ema_param * price + (1 - self.ema_param) * self.eqlbm\n \n def newton4Buying(self):\n # runs Newton-Raphson to find theta_est (the value of theta that makes the 1st \n # derivative of eqn(3) continuous)\n theta_est = self.theta\n rightHside = ((self.theta * (self.limit - self.eqlbm)) / float(math.exp(self.theta) - 1));\n i = 0\n while i <= self.maxNewtonItter:\n eX = math.exp(theta_est)\n eXminOne = eX - 1\n fofX = (((theta_est * self.eqlbm) / float(eXminOne)) - rightHside)\n if abs(fofX) <= self.maxNewtonError:\n break\n dfofX = ((self.eqlbm / eXminOne) - ((eX * self.eqlbm * theta_est) / float(eXminOne * eXminOne)))\n theta_est = (theta_est - (fofX / float(dfofX)));\n i += 1\n if theta_est == 0.0: theta_est += 0.000001\n return theta_est\n \n def newton4Selling(self):\n # runs Newton-Raphson to find theta_est (the value of theta that makes the 1st \n # derivative of eqn(4) continuous)\n theta_est = self.theta\n rightHside = ((self.theta * (self.eqlbm - self.limit)) / float(math.exp(self.theta) - 1))\n i = 0\n while i <= self.maxNewtonItter:\n eX = math.exp(theta_est)\n eXminOne = eX - 1\n fofX = (((theta_est * (self.marketMax - self.eqlbm)) / float(eXminOne)) - rightHside)\n if abs(fofX) <= self.maxNewtonError:\n break\n dfofX = (((self.marketMax - self.eqlbm) / eXminOne) - ((eX * (self.marketMax - self.eqlbm) * theta_est) / float(eXminOne * eXminOne)))\n theta_est = (theta_est - (fofX / float(dfofX)))\n i += 1\n if theta_est == 0.0: theta_est += 0.000001\n return theta_est\n \n def updateTarget(self):\n # relates to eqns (3),(4),(5) and (6)\n # For buying\n if self.limit < self.eqlbm:\n # Extra-marginal buyer\n if self.aggressiveness_buy >= 0: target = self.limit\n else: target = self.limit * (1 - (math.exp(-self.aggressiveness_buy * self.theta) - 1) / float(math.exp(self.theta) - 1))\n self.target_buy = target\n else:\n # Intra-marginal buyer\n if self.aggressiveness_buy >= 0: target = (self.eqlbm + (self.limit - self.eqlbm) * ((math.exp(self.aggressiveness_buy * self.theta) - 1) / float(math.exp(self.theta) - 1)))\n else:\n theta_est = self.newton4Buying()\n target = self.eqlbm * (1 - (math.exp(-self.aggressiveness_buy * theta_est) - 1) / float(math.exp(theta_est) - 1))\n self.target_buy = target\n # For selling\n if self.limit > self.eqlbm:\n # Extra-marginal seller\n if self.aggressiveness_sell >= 0: target = self.limit\n else: target = self.limit + (self.marketMax - self.limit) * ((math.exp(-self.aggressiveness_sell * self.theta) - 1) / float(math.exp(self.theta) - 1))\n self.target_sell = target\n else:\n # Intra-marginal seller\n if self.aggressiveness_sell >= 0: target = self.limit + (self.eqlbm - self.limit) * (1 - (math.exp(self.aggressiveness_sell * self.theta) - 1) / float(math.exp(self.theta) - 1))\n else:\n theta_est = self.newton4Selling() \n target = self.eqlbm + (self.marketMax - self.eqlbm) * ((math.exp(-self.aggressiveness_sell * theta_est) - 1) / (math.exp(theta_est) - 1))\n self.target_sell = target\n \n def calcRshout(self, target, buying):\n if buying:\n # Are we extramarginal?\n if self.eqlbm >= self.limit:\n r_shout = 0.0\n else: # Intra-marginal\n if target > self.eqlbm:\n if target > self.limit: target = self.limit\n r_shout = math.log((((target - self.eqlbm) * (math.exp(self.theta) - 1)) / (self.limit - self.eqlbm)) + 1) / self.theta\n else: # other formula for intra buyer\n r_shout = math.log((1 - (target / self.eqlbm)) * (math.exp(self.newton4Buying()) - 1) + 1) / -self.newton4Buying()\n else: # Selling\n # Are we extra-marginal?\n if self.limit >= self.eqlbm:\n r_shout = 0.0\n else: # Intra-marginal\n if target > self.eqlbm:\n r_shout = math.log(((target - self.eqlbm) * (math.exp(self.newton4Selling()) - 1)) / (self.marketMax - self.eqlbm) + 1) / -self.newton4Selling()\n else: # other intra seller formula\n if target < self.limit: target = self.limit\n r_shout = math.log((1 - (target - self.limit) / (self.eqlbm - self.limit)) * (math.exp(self.theta) - 1) + 1) / self.theta\n return r_shout\n \n def updateAgg(self, up, buying, target):\n if buying:\n old_agg = self.aggressiveness_buy \n else:\n old_agg = self.aggressiveness_sell\n if up:\n delta = (1 + self.lambda_r) * self.calcRshout(target, buying) + self.lambda_a\n else:\n delta = (1 - self.lambda_r) * self.calcRshout(target, buying) - self.lambda_a\n new_agg = old_agg + self.beta_1 * (delta - old_agg)\n if new_agg > 1.0: new_agg = 1.0\n elif new_agg < 0.0: new_agg = 0.000001\n return new_agg\n \n def updateSmithsAlpha(self, price):\n self.lastTrades.append(price)\n if not (len(self.lastTrades) <= self.nLastTrades): self.lastTrades.pop(0)\n self.smithsAlpha = math.sqrt(sum(((p - self.eqlbm) ** 2) for p in self.lastTrades) * (1 / float(len(self.lastTrades)))) / self.eqlbm\n if self.smithsAlphaMin == None:\n self.smithsAlphaMin = self.smithsAlpha\n self.smithsAlphaMax = self.smithsAlphaMax\n else:\n if self.smithsAlpha < self.smithsAlphaMin: self.smithsAlphaMin = self.smithsAlpha\n if self.smithsAlpha > self.smithsAlphaMax: self.smithsAlphaMax = self.smithsAlpha\n \n def updateTheta(self):\n alphaBar = (self.smithsAlpha - self.smithsAlphaMin) / (self.smithsAlphaMax - self.smithsAlphaMin)\n desiredTheta = (self.theta_max - self.theta_min) * (1 - (alphaBar * math.exp(self.gamma * (alphaBar - 1)))) + self.theta_min\n theta = self.theta + self.beta_2 * (desiredTheta - self.theta)\n if theta == 0: theta += 0.0000001\n self.theta = theta\n \n def getorder(self, time, countdown, lob):\n if len(self.orders) < 1:\n self.active = False\n order = None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].otype\n self.updateTarget()\n if self.job == 'Bid':\n # currently a buyer (working a bid order)\n if self.spin_up_time > 0:\n ask_plus = (1 + self.lambda_r) * self.prev_best_ask_p + self.lambda_a\n quoteprice = self.prev_best_bid_p + (min(self.limit, ask_plus) - self.prev_best_bid_p) / self.eta\n else:\n quoteprice = self.prev_best_bid_p + (self.target - self.prev_best_bid_p) / self.eta\n else:\n # currently a seller (working a sell order)\n if self.spin_up_time > 0:\n bid_minus = (1 - self.lambda_r) * self.prev_best_bid_p - self.lambda_a\n quoteprice = self.prev_best_ask_p - (self.prev_best_ask_p - max(self.limit, bid_minus)) / self.eta\n else:\n quoteprice = (self.prev_best_ask_p - (self.prev_best_ask_p - self.target) / self.eta)\n \n order = Order(self.tid, self.job, quoteprice, self.orders[0].qty, time)\n \n return order \n \n \n def respond(self, time, lob, trade, verbose):\n # what, if anything, has happened on the bid LOB?\n bid_improved = False\n bid_hit = False\n lob_best_bid_p = lob['bids']['best']\n lob_best_bid_q = None\n if lob_best_bid_p != None:\n # non-empty bid LOB\n lob_best_bid_q = lob['bids']['lob'][-1][1]\n if self.prev_best_bid_p < lob_best_bid_p :\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\n # previous best bid was hit\n bid_hit = True\n elif self.prev_best_bid_p != None:\n # the bid LOB has been emptied by a hit\n bid_hit = True\n\n # what, if anything, has happened on the ask LOB?\n ask_improved = False\n ask_lifted = False\n lob_best_ask_p = lob['asks']['best']\n lob_best_ask_q = None\n if lob_best_ask_p != None:\n # non-empty ask LOB\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if self.prev_best_ask_p > lob_best_ask_p :\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\n ask_lifted = True\n elif self.prev_best_ask_p != None:\n # the bid LOB is empty now but was not previously, so must have been hit\n ask_lifted = True\n\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\n print ('B_improved', bid_improved, 'B_hit', bid_hit, 'A_improved', ask_improved, 'A_lifted', ask_lifted)\n\n deal = bid_hit or ask_lifted\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_ask_p = lob_best_ask_p\n \n \n if self.spin_up_time > 0: self.spin_up_time -= 1\n if deal:\n price = trade['price']\n self.updateEq(price)\n self.updateSmithsAlpha(price)\n self.updateTheta()\n \n # The lines below represent the rules in fig(7) in AIJ08. The if statements have not\n # been merged for the sake of clarity.\n \n # For buying\n if deal:\n if self.target >= price: \n self.aggressiveness_buy = self.updateAgg(False, True, price)\n else: self.aggressiveness_buy = self.updateAgg(True, True, price)\n elif bid_improved and (self.target <= price): self.aggressiveness_buy = self.updateAgg(True, True, self.prev_best_bid_p)\n # For selling\n if deal:\n if self.target <= price: self.aggressiveness_sell = self.updateAgg(False, False, price)\n else: self.aggressiveness_sell = self.updateAgg(True, False, price)\n elif ask_improved and (self.target >= price): self.aggressiveness_sell = self.updateAgg(True, False, self.prev_best_ask_p)\n \n self.updateTarget()\n \n" }, { "alpha_fraction": 0.3989556133747101, "alphanum_fraction": 0.4157702326774597, "avg_line_length": 27.909374237060547, "blob_id": "e456ad3e61e73fb77bbb0d01c57d6dca0293f7a0", "content_id": "27b68a3bf98f6152d65db254e5c6383798098be2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9575, "license_type": "permissive", "max_line_length": 118, "num_lines": 320, "path": "/ZhenZhang/source/ZZISHV.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\n\r\nclass Trader_ZZISHV(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time,m):\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n self.limit = None\r\n self.job = None\r\n\r\n # variable for MLOFI\r\n self.last_lob = None;\r\n self.es_list = [];\r\n self.ds_list = [];\r\n\r\n #variable for ratio\r\n self.bids_volume_list = []\r\n self.asks_volume_list = []\r\n\r\n # m\r\n self.m = m;\r\n\r\n\r\n\r\n def is_imbalance_significant(self, m,threshold):\r\n cb_list = [0 for i in range(m)]\r\n ab_list = []\r\n\r\n ca_list = [0 for i in range(m)]\r\n aa_list = []\r\n\r\n n = 1\r\n\r\n while len(self.bids_volume_list) >= n and len(self.asks_volume_list) >= n:\r\n for i in range(m):\r\n cb_list[i] += self.bids_volume_list[-n]['level' + str(i + 1)]\r\n ca_list[i] += self.asks_volume_list[-n]['level' + str(i + 1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n\r\n for i in range(m):\r\n temp1 = None\r\n temp2 = None\r\n if n == 1:\r\n temp1 = cb_list[i] + 1\r\n temp2 = ca_list[i] + 1\r\n else:\r\n temp1 = cb_list[i] / (n - 1) + 1\r\n temp2 = ca_list[i] / (n - 1) + 1\r\n ab_list.append(temp1)\r\n aa_list.append(temp2)\r\n\r\n v_bid = 0;\r\n v_ask = 0;\r\n for i in range(m):\r\n v_bid += math.exp(-0.5*i)*ab_list[i];\r\n v_ask += math.exp(-0.5*i)*aa_list[i];\r\n ratio = (v_bid-v_ask)/(v_bid+v_ask);\r\n\r\n # print self.bids_volume_list\r\n # print self.asks_volume_list\r\n # print ratio\r\n\r\n if(ratio>threshold or ratio<-threshold):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n def calc_bids_volume(self, lob, m, verbose):\r\n new_b = {}\r\n\r\n for i in range(1, m + 1):\r\n new_b['level' + str(i)] = self.cal_bids_n(lob, i)\r\n\r\n self.bids_volume_list.append(new_b)\r\n\r\n def cal_bids_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n return r_n\r\n\r\n def calc_asks_volume(self, lob, m, verbose):\r\n new_a = {}\r\n\r\n for i in range(1, m + 1):\r\n new_a['level' + str(i)] = self.cal_asks_n(lob, i);\r\n\r\n self.asks_volume_list.append(new_a)\r\n\r\n def cal_asks_n(self, lob, n):\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return q_n\r\n\r\n def calc_level_n_e(self, current_lob, n):\r\n b_n = 0\r\n r_n = 0\r\n a_n = 0\r\n q_n = 0\r\n\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n\r\n if (len(current_lob['bids']['lob']) < n):\r\n b_n = 0\r\n r_n = 0\r\n else:\r\n b_n = current_lob['bids']['lob'][n - 1][0]\r\n r_n = current_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['bids']['lob']) < n):\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n else:\r\n b_n_1 = self.last_lob['bids']['lob'][n - 1][0]\r\n r_n_1 = self.last_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(current_lob['asks']['lob']) < n):\r\n a_n = 0\r\n q_n = 0\r\n else:\r\n a_n = current_lob['asks']['lob'][n - 1][0]\r\n q_n = current_lob['asks']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['asks']['lob']) < n):\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n else:\r\n a_n_1 = self.last_lob['asks']['lob'][n - 1][0]\r\n q_n_1 = self.last_lob['asks']['lob'][n - 1][1]\r\n\r\n delta_w = 0;\r\n\r\n if (b_n > b_n_1):\r\n delta_w = r_n\r\n elif (b_n == b_n_1):\r\n delta_w = r_n - r_n_1\r\n else:\r\n delta_w = -r_n_1\r\n\r\n delta_v = 0\r\n if (a_n > a_n_1):\r\n delta_v = -q_n_1\r\n elif (a_n == a_n_1):\r\n delta_v = q_n - q_n_1\r\n else:\r\n delta_v = q_n\r\n\r\n return delta_w - delta_v\r\n\r\n def calc_es(self, lob, m, verbose):\r\n new_e = {}\r\n for i in range(1, m + 1):\r\n new_e['level' + str(i)] = self.calc_level_n_e(lob, i)\r\n\r\n self.es_list.append(new_e)\r\n\r\n def calc_ds(self, lob, m, verbose):\r\n new_d = {}\r\n\r\n for i in range(1, m + 1):\r\n new_d['level' + str(i)] = self.cal_depth_n(lob, i)\r\n\r\n self.ds_list.append(new_d)\r\n\r\n def cal_depth_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return (r_n + q_n) / 2\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n if (self.last_lob == None):\r\n self.last_lob = lob\r\n else:\r\n self.calc_es(lob, self.m, verbose)\r\n self.calc_ds(lob, self.m, verbose)\r\n self.calc_bids_volume(lob, self.m, verbose)\r\n self.calc_asks_volume(lob, self.m, verbose)\r\n self.last_lob = lob\r\n\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n\r\n if verbose: print(\"ISHV getorder:\")\r\n\r\n shave_c = 2 # c in the y=mx+c linear mapping from imbalance to shave amount\r\n shave_m = 1 # m in the y=mx+c\r\n\r\n if len(self.orders) < 1:\r\n order = None\r\n else:\r\n if verbose: print(\" self.orders[0]=%s\" % str(self.orders[0]))\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].atype\r\n\r\n otype = self.orders[0].atype\r\n ostyle = self.orders[0].astyle\r\n\r\n microp = lob['microprice']\r\n midp = lob['midprice']\r\n\r\n if otype == 'Bid':\r\n if len(lob['bids']['lob']) > 0:\r\n quoteprice = lob['bids']['bestp']\r\n if quoteprice > self.limit :\r\n quoteprice = self.limit\r\n else:\r\n quoteprice = 1 # KLUDGE -- come back to fix todo\r\n else:\r\n\r\n if len(lob['asks']['lob']) > 0:\r\n quoteprice = lob['asks']['bestp']\r\n if quoteprice < self.limit:\r\n quoteprice = self.limit\r\n else:\r\n quoteprice = 200 # KLUDGE -- come back to fix todo\r\n\r\n\r\n\r\n\r\n def imbalance_alter(quoteprice_aa, lob, countdown, m):\r\n\r\n mlofi_list = [0 for i in range(m)]\r\n cd_list = [0 for i in range(m)]\r\n ad_list = []\r\n n = 1\r\n\r\n while len(self.es_list) >= n:\r\n for i in range(m):\r\n mlofi_list[i] += self.es_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n n = 1\r\n\r\n while len(self.ds_list) >= n:\r\n for i in range(m):\r\n cd_list[i] += self.ds_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n for i in range(m):\r\n temp = None\r\n if n == 1:\r\n temp = cd_list[i]+1\r\n else:\r\n temp = cd_list[i]/(n-1)+1\r\n ad_list.append(temp)\r\n\r\n c = 5\r\n decay = 0.8\r\n offset = 0\r\n\r\n for i in range(m):\r\n offset += int(mlofi_list[i]*c*pow(decay,i)/ ad_list[i])\r\n\r\n\r\n benchmark = quoteprice_aa;\r\n if(lob['midprice'] != None):\r\n benchmark = lob['midprice']\r\n # print 'midprice is %d' % benchmark\r\n\r\n quoteprice_iaa = quoteprice_aa + 0.8 * (benchmark + offset - quoteprice_aa)\r\n if self.job == 'Bid' and quoteprice_iaa > self.limit:\r\n quoteprice_iaa = self.limit\r\n if self.job == 'Ask' and quoteprice_iaa < self.limit:\r\n quoteprice_iaa = self.limit\r\n\r\n if countdown < 0.3 :\r\n print \"insert\"\r\n if self.job == 'Bid' and (len(lob['asks']['lob']) >= 1) and lob['asks']['lob'][0][0] < self.limit:\r\n quoteprice_iaa = lob['asks']['lob'][0][0]\r\n if self.job == 'Ask' and (len(lob['bids']['lob']) >= 1) and lob['bids']['lob'][0][0] > self.limit:\r\n quoteprice_iaa = lob['bids']['lob'][0][0]\r\n\r\n return quoteprice_iaa\r\n\r\n if(self.is_imbalance_significant(self.m,0.6)):\r\n # print \"abvious\"\r\n quoteprice_iaa = imbalance_alter(quoteprice, lob, countdown, self.m)\r\n\r\n else:\r\n # print \"not abvious\"\r\n quoteprice_iaa = quoteprice\r\n\r\n\r\n\r\n order = Order(self.tid, otype, ostyle, quoteprice_iaa, self.orders[0].qty, time, None, verbose)\r\n self.lastquote = order\r\n return order\r\n\r\n" }, { "alpha_fraction": 0.49242228269577026, "alphanum_fraction": 0.557539165019989, "avg_line_length": 27.044776916503906, "blob_id": "e1232d8972777fe029790529f0a9265c431290c6", "content_id": "869bdc584f75db3752ed4ffe49c4901386bc8e7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7786, "license_type": "permissive", "max_line_length": 122, "num_lines": 268, "path": "/ZhenZhang/source/dataAnalysis/box_analysis.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nfrom pylab import *\r\n# Fixing random state for reproducibility\r\nnp.random.seed(19680801)\r\n\r\ncsv_file = open(\"../Mybalances.csv\",\"r\")\r\ncsv_reader = csv.reader(csv_file);\r\n\r\ny1 = []\r\ny2 = []\r\ny3 = []\r\ny4 = []\r\nname1 = None\r\nname2 = None\r\nname3 = None\r\nname4 = None\r\n\r\ncy1 = 0;\r\ncy2 = 0;\r\ncy3 = 0;\r\ncy4 = 0;\r\ncount = 0\r\nfor item in csv_reader:\r\n y1.append(int(float(item[5])))\r\n # y2.append(int(float(item[13])))\r\n # cy1 += int(float(item[5]))\r\n # cy2 += int(float(item[13]))\r\n\r\n\r\n\r\n y3.append(int(float(item[17])))\r\n #y4.append(int(float(item[21])))\r\n\r\n # y3.append(int(float(item[13])))\r\n # y4.append(int(float(item[17])))\r\n # name1 = item[2]\r\n # name2 = item[6]\r\n # name3 = item[10]\r\n # name4 = item[14]\r\n # print '%s,%s'%(item[5],item[9])\r\n count += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\nfig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 8))\r\n\r\n\r\ndata = [y1,y3]\r\nnames = ['AA','IAA']\r\n\r\n\r\nax1.set_title('(a) The daily profits for different agents')\r\ngreen_diamond = dict(markerfacecolor='g', marker='D')\r\ndp1= ax1.boxplot(data,vert=True,whis=0.75,notch=False, labels=names,showmeans=True,meanline=False,meanprops=green_diamond)\r\n\r\n\r\n\r\n\r\nx1,y = dp1['medians'][0].get_xydata()[1]\r\nax1.text(x1+0.075, y, '%.1f' % y,horizontalalignment='left')\r\nx2,y = dp1['medians'][1].get_xydata()[1]\r\nax1.text(x2+0.075, y, '%.1f' % y,horizontalalignment='left')\r\n\r\n# for line in dp1['medians']:\r\n# # get position data for median line\r\n# x, y = line.get_xydata()[1] # top of median line\r\n# # overlay median value\r\n# print x\r\n# print y\r\n# ax1.text(x+0.125, y, '%.1f' % y,horizontalalignment='left')\r\n\r\n\r\nfor index in range(len(dp1['means'])):\r\n y = dp1['means'][index].get_ydata()[0]\r\n if index==0:\r\n ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\n\r\nfor index in range(len(dp1['boxes'])):\r\n y = dp1['boxes'][index].get_ydata()[0]\r\n if index==0:\r\n ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n y = dp1['boxes'][index].get_ydata()[2]\r\n if index==0:\r\n ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n # elif index ==1:\r\n # ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\n# for index in range(len(dp1['boxes'])):\r\n#\r\n# y = dp1['boxes'][index].get_ydata()[0]\r\n#\r\n# if index==0:\r\n# ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n# elif index ==1:\r\n# ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n#\r\n# y = dp1['boxes'][index].get_ydata()[2]\r\n#\r\n# if index==0:\r\n# ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n# elif index ==1:\r\n# ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\nfor index in range(len(dp1['caps'])):\r\n print 'one round'\r\n print index\r\n y = dp1['caps'][index].get_ydata()[0]\r\n print 'bottom'\r\n print y\r\n if index==0:\r\n ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==2:\r\n ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==3:\r\n ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n # y = dp1['caps'][index].get_ydata()[1]\r\n # print 'top'\r\n # print y\r\n # if index==0:\r\n # ax1.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n # elif index ==1:\r\n # ax1.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n# for line in dp1['means']:\r\n# # get position data for median line\r\n# x, y = line.get_ydata()[0] # top of median line\r\n# # overlay median value\r\n# print x\r\n# print y\r\n# ax1.text(x+0.125, y, '%.1f' % y,horizontalalignment='left')\r\n#\r\n# print 'boxes'\r\n#\r\n# for line in dp1['boxes']:\r\n#\r\n# x, y = line.get_xydata()[0] # bottom of left line\r\n# ax1.text(x+0.125,y, '%.1f' % y,horizontalalignment='right') # below\r\n# x, y = line.get_xydata()[2] # bottom of right line\r\n# ax1.text(x+0.125,y, '%.1f' % y,horizontalalignment='right') # below\r\n\r\n\r\n\r\n\r\ndif1 = []\r\nfor i in range(len(y1)):\r\n dif1.append(y3[i]-y1[i])\r\n\r\n\r\nax2.set_title('(b) The profit differences for different agents')\r\ndp2 = ax2.boxplot(dif1,vert=True,whis=0.75,notch=False, showmeans=True,labels=['IAA-AA'],meanprops=green_diamond)\r\n\r\nx1,y = dp2['medians'][0].get_xydata()[1]\r\nax2.text(x1+0.075, y, '%.1f' % y,horizontalalignment='left')\r\n# x2,y = dp1['medians'][1].get_xydata()[1]\r\n# ax1.text(x2+0.075, y, '%.1f' % y,horizontalalignment='left')\r\n\r\n# for line in dp1['medians']:\r\n# # get position data for median line\r\n# x, y = line.get_xydata()[1] # top of median line\r\n# # overlay median value\r\n# print x\r\n# print y\r\n# ax1.text(x+0.125, y, '%.1f' % y,horizontalalignment='left')\r\n\r\n\r\nfor index in range(len(dp2['means'])):\r\n y = dp2['means'][index].get_ydata()[0]\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\n\r\nfor index in range(len(dp2['boxes'])):\r\n y = dp2['boxes'][index].get_ydata()[0]\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n y = dp2['boxes'][index].get_ydata()[2]\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\nfor index in range(len(dp2['boxes'])):\r\n\r\n y = dp2['boxes'][index].get_ydata()[0]\r\n\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n y = dp2['boxes'][index].get_ydata()[2]\r\n\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\nfor index in range(len(dp2['caps'])):\r\n print 'one round'\r\n print index\r\n y = dp2['caps'][index].get_ydata()[0]\r\n print 'bottom'\r\n print y\r\n if index==0:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==1:\r\n ax2.text(x1 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==2:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n elif index ==3:\r\n ax2.text(x2 + 0.075, y, '%.1f' % y, horizontalalignment='left')\r\n\r\n\r\n\r\n\r\nplt.savefig(\"./box1.png\")\r\n\r\n\r\n# # fake up some data\r\n# spread = np.random.rand(50) * 100\r\n# center = np.ones(25) * 50\r\n# flier_high = np.random.rand(10) * 100 + 100\r\n# flier_low = np.random.rand(10) * -100\r\n# data = np.concatenate((spread, center, flier_high, flier_low))\r\n#\r\n# fig1, ax1 = plt.subplots()\r\n# ax1.set_title('Basic Plot')\r\n# ax1.boxplot(data,vert=False)\r\n#\r\n#\r\n# spread = np.random.rand(50) * 100\r\n# center = np.ones(25) * 40\r\n# flier_high = np.random.rand(10) * 100 + 100\r\n# flier_low = np.random.rand(10) * -100\r\n# d2 = np.concatenate((spread, center, flier_high, flier_low))\r\n# data.shape = (-1, 1)\r\n# d2.shape = (-1, 1)\r\n#\r\n# data = [data, d2, d2[::2,0]]\r\n# fig7, ax7 = plt.subplots()\r\n# ax7.set_title('Multiple Samples with Different sizes')\r\n# ax7.boxplot(data,vert=False)\r\n#\r\n# plt.show()" }, { "alpha_fraction": 0.45686209201812744, "alphanum_fraction": 0.4680861234664917, "avg_line_length": 38.62035369873047, "blob_id": "77d1570d590b4af34b092c66a14321b86d76f4e9", "content_id": "2d9fffb445b5488a096838ed0937eb6ce17d824e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20759, "license_type": "permissive", "max_line_length": 189, "num_lines": 511, "path": "/ZhenZhang/source/IAA_MLOFI.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\n\r\nclass Trader_IAA_MLOFI(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time,m):\r\n\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n\r\n self.limit = None\r\n self.job = None\r\n\r\n # learning variables\r\n self.r_shout_change_relative = 0.05\r\n self.r_shout_change_absolute = 0.05\r\n self.short_term_learning_rate = random.uniform(0.1, 0.5)\r\n self.long_term_learning_rate = random.uniform(0.1, 0.5)\r\n self.moving_average_weight_decay = 0.95 # how fast weight decays with time, lower is quicker, 0.9 in vytelingum\r\n self.moving_average_window_size = 5\r\n self.offer_change_rate = 3.0\r\n self.theta = -2.0\r\n self.theta_max = 2.0\r\n self.theta_min = -8.0\r\n self.marketMax = bse_sys_maxprice\r\n\r\n # Variables to describe the market\r\n self.previous_transactions = []\r\n self.moving_average_weights = []\r\n for i in range(self.moving_average_window_size):\r\n self.moving_average_weights.append(self.moving_average_weight_decay ** i)\r\n self.estimated_equilibrium = []\r\n self.smiths_alpha = []\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n # Trading Variables\r\n self.r_shout = None\r\n self.buy_target = None\r\n self.sell_target = None\r\n self.buy_r = -1.0 * (0.3 * random.random())\r\n self.sell_r = -1.0 * (0.3 * random.random())\r\n\r\n # variable for MLOFI\r\n self.last_lob = None;\r\n self.es_list = [];\r\n self.ds_list = [];\r\n\r\n #variable\r\n self.m = m;\r\n\r\n\r\n\r\n def calc_level_n_e(self, current_lob, n):\r\n b_n = 0\r\n r_n = 0\r\n a_n = 0\r\n q_n = 0\r\n\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n\r\n if (len(current_lob['bids']['lob']) < n):\r\n b_n = 0\r\n r_n = 0\r\n else:\r\n b_n = current_lob['bids']['lob'][n - 1][0]\r\n r_n = current_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['bids']['lob']) < n):\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n else:\r\n b_n_1 = self.last_lob['bids']['lob'][n - 1][0]\r\n r_n_1 = self.last_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(current_lob['asks']['lob']) < n):\r\n a_n = 0\r\n q_n = 0\r\n else:\r\n a_n = current_lob['asks']['lob'][n - 1][0]\r\n q_n = current_lob['asks']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['asks']['lob']) < n):\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n else:\r\n a_n_1 = self.last_lob['asks']['lob'][n - 1][0]\r\n q_n_1 = self.last_lob['asks']['lob'][n - 1][1]\r\n\r\n delta_w = 0;\r\n\r\n if (b_n > b_n_1):\r\n delta_w = r_n\r\n elif (b_n == b_n_1):\r\n delta_w = r_n - r_n_1\r\n else:\r\n delta_w = -r_n_1\r\n\r\n delta_v = 0\r\n if (a_n > a_n_1):\r\n delta_v = -q_n_1\r\n elif (a_n == a_n_1):\r\n delta_v = q_n - q_n_1\r\n else:\r\n delta_v = q_n\r\n\r\n return delta_w - delta_v\r\n\r\n def calc_es(self, lob, m, verbose):\r\n new_e = {}\r\n for i in range(1, m + 1):\r\n new_e['level' + str(i)] = self.calc_level_n_e(lob, i)\r\n\r\n self.es_list.append(new_e)\r\n\r\n def calc_ds(self, lob, m, verbose):\r\n new_d = {}\r\n\r\n for i in range(1, m + 1):\r\n new_d['level' + str(i)] = self.cal_depth_n(lob, i)\r\n\r\n self.ds_list.append(new_d)\r\n\r\n def cal_depth_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return (r_n + q_n) / 2\r\n\r\n def calcEq(self): ##clear and correct\r\n # Slightly modified from paper, it is unclear inpaper\r\n # N previous transactions * weights / N in vytelingum, swap N denominator for sum of weights to be correct?\r\n if len(self.previous_transactions) == 0:\r\n return\r\n elif len(self.previous_transactions) < self.moving_average_window_size:\r\n # Not enough transactions\r\n self.estimated_equilibrium.append(\r\n float(sum(self.previous_transactions)) / max(len(self.previous_transactions), 1))\r\n else:\r\n N_previous_transactions = self.previous_transactions[-self.moving_average_window_size:]\r\n thing = [N_previous_transactions[i] * self.moving_average_weights[i] for i in\r\n range(self.moving_average_window_size)]\r\n eq = sum(thing) / sum(self.moving_average_weights)\r\n self.estimated_equilibrium.append(eq)\r\n\r\n def calcAlpha(self): ##correct. but calcAlpha in snashall's version is incorrect\r\n alpha = 0.0\r\n for p in self.previous_transactions:\r\n alpha += (p - self.estimated_equilibrium[-1]) ** 2\r\n alpha = math.sqrt(alpha / len(self.previous_transactions))\r\n self.smiths_alpha.append(alpha / self.estimated_equilibrium[-1])\r\n\r\n def calcTheta(self): ## clear and correct\r\n gamma = 2.0 # not sensitive apparently so choose to be whatever\r\n # necessary for intialisation, div by 0\r\n if min(self.smiths_alpha) == max(self.smiths_alpha):\r\n alpha_range = 0.4 # starting value i guess\r\n else:\r\n alpha_range = (self.smiths_alpha[-1] - min(self.smiths_alpha)) / (\r\n max(self.smiths_alpha) - min(self.smiths_alpha))\r\n theta_range = self.theta_max - self.theta_min\r\n desired_theta = self.theta_min + (theta_range) * (1 - alpha_range) * math.exp(gamma * (alpha_range - 1))\r\n self.theta = self.theta + self.long_term_learning_rate * (desired_theta - self.theta)\r\n if self.theta > self.theta_max:\r\n self.theta = self.theta_max\r\n if self.theta < self.theta_min:\r\n self.theta = self.theta_min\r\n\r\n def calcRshout(self): ## unclear in Vytelingum's paper\r\n p = self.estimated_equilibrium[-1]\r\n l = self.limit\r\n theta = self.theta\r\n if self.job == 'Bid':\r\n # Currently a buyer\r\n if l <= p: # extramarginal!\r\n self.r_shout = 0.0\r\n else: # intramarginal :(\r\n if self.buy_target > self.estimated_equilibrium[-1]:\r\n # r[0,1]\r\n self.r_shout = math.log(((self.buy_target - p) * (math.exp(theta) - 1) / (l - p)) + 1) / theta\r\n else:\r\n # r[-1,0]\r\n # print 'buy_target: %f , p: %f , theta: %f' %(self.buy_target,p,theta)\r\n self.r_shout = math.log((1 - (self.buy_target / p)) * (math.exp(theta) - 1) + 1) / theta\r\n # self.r_shout = self.buy_r\r\n\r\n if self.job == 'Ask':\r\n # Currently a seller\r\n if l >= p: # extramarginal!\r\n self.r_shout = 0\r\n else: # intramarginal :(\r\n if self.sell_target > self.estimated_equilibrium[-1]:\r\n # r[-1,0]\r\n self.r_shout = math.log(\r\n (self.sell_target - p) * (math.exp(theta) - 1) / (self.marketMax - p) + 1) / theta\r\n else:\r\n # r[0,1]\r\n a = (self.sell_target - l) / (p - l)\r\n self.r_shout = (math.log((1 - a) * (math.exp(theta) - 1) + 1)) / theta\r\n # self.r_shout = self.sell_r\r\n\r\n def calcAgg(self):\r\n delta = 0\r\n if self.job == 'Bid':\r\n # BUYER\r\n if self.buy_target >= self.previous_transactions[-1]:\r\n # must be more aggressive\r\n delta = (1 + self.r_shout_change_relative) * self.r_shout + self.r_shout_change_absolute\r\n else:\r\n delta = (1 - self.r_shout_change_relative) * self.r_shout - self.r_shout_change_absolute\r\n\r\n self.buy_r = self.buy_r + self.short_term_learning_rate * (delta - self.buy_r)\r\n\r\n if self.job == 'Ask':\r\n # SELLER\r\n if self.sell_target > self.previous_transactions[-1]:\r\n delta = (1 + self.r_shout_change_relative) * self.r_shout + self.r_shout_change_absolute\r\n else:\r\n delta = (1 - self.r_shout_change_relative) * self.r_shout - self.r_shout_change_absolute\r\n\r\n self.sell_r = self.sell_r + self.short_term_learning_rate * (delta - self.sell_r)\r\n\r\n def calcTarget(self):\r\n if len(self.estimated_equilibrium) > 0:\r\n p = self.estimated_equilibrium[-1]\r\n if self.limit == p:\r\n p = p * 1.000001 # to prevent theta_bar = 0\r\n elif self.job == 'Bid':\r\n p = self.limit - self.limit * 0.2 ## Initial guess for eq if no deals yet!!....\r\n elif self.job == 'Ask':\r\n p = self.limit + self.limit * 0.2\r\n l = self.limit\r\n theta = self.theta\r\n if self.job == 'Bid':\r\n # BUYER\r\n minus_thing = self.buy_r * math.exp(theta * (self.buy_r - 1))\r\n\r\n if l <= p: # Extramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = l\r\n else:\r\n self.buy_target = l * (1 - minus_thing)\r\n else: # intramarginal\r\n if self.buy_r >= 0:\r\n # theta_ba = (p * math.exp(-theta))/(l-p)-1\r\n theta_ba = theta\r\n # print 'theta: %f' %(self.theta)\r\n # print 'theta_ba: %f '%(theta_ba)\r\n # print 'l-p: %f '%(l-p)\r\n # print 'self.buy_r :%f' %(self.buy_r)\r\n\r\n self.buy_target = (l - p) * (1 - (self.buy_r + 1) * math.exp(self.buy_r * theta_ba)) + p\r\n else:\r\n self.buy_target = p * (1 - minus_thing)\r\n if self.buy_target > l:\r\n self.buy_target = l\r\n if self.buy_target < bse_sys_minprice:\r\n self.buy_target = bse_sys_minprice\r\n # print 'buy_target = %f'%(self.buy_target)\r\n\r\n if self.job == 'Ask':\r\n # SELLER\r\n\r\n if l <= p: # Intramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = p + (p - l) * self.sell_r * math.exp((self.sell_r - 1) * theta)\r\n else:\r\n theta_ba = math.log((self.marketMax - p) / (p - l)) - theta\r\n self.buy_target = p + (self.marketMax - p) * self.sell_r * math.exp((self.sell_r + 1) * theta_ba)\r\n else: # Extramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = l\r\n else:\r\n self.buy_target = l + (self.marketMax - l) * self.sell_r * math.exp((self.sell_r - 1) * theta)\r\n if self.sell_target < l:\r\n self.sell_target = l\r\n if self.sell_target > bse_sys_maxprice:\r\n self.sell_target = bse_sys_maxprice\r\n # print 'sell_target = %f'%(self.sell_target)\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n return None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].atype\r\n self.calcTarget()\r\n\r\n if self.prev_best_bid_p == None:\r\n o_bid = 0\r\n else:\r\n o_bid = self.prev_best_bid_p\r\n if self.prev_best_ask_p == None:\r\n o_ask = self.marketMax\r\n else:\r\n o_ask = self.prev_best_ask_p\r\n\r\n if self.job == 'Bid': # BUYER\r\n if self.limit <= o_bid:\r\n return None\r\n else:\r\n if len(self.previous_transactions) <= 0: ## has been at least one transaction\r\n o_ask_plus = (1 + self.r_shout_change_relative) * o_ask + self.r_shout_change_absolute\r\n quoteprice = o_bid + ((min(self.limit, o_ask_plus) - o_bid) / self.offer_change_rate)\r\n else:\r\n if o_ask <= self.buy_target:\r\n quoteprice = o_ask\r\n else:\r\n quoteprice = o_bid + ((self.buy_target - o_bid) / self.offer_change_rate)\r\n if self.job == 'Ask':\r\n if self.limit >= o_ask:\r\n return None\r\n else:\r\n if len(self.previous_transactions) <= 0: ## has been at least one transaction\r\n o_bid_minus = (1 - self.r_shout_change_relative) * o_bid - self.r_shout_change_absolute\r\n quoteprice = o_ask - ((o_ask - max(self.limit, o_bid_minus)) / self.offer_change_rate)\r\n else:\r\n if o_bid >= self.sell_target:\r\n quoteprice = o_bid\r\n else:\r\n quoteprice = o_ask - ((o_ask - self.sell_target) / self.offer_change_rate)\r\n\r\n def imbalance_alter(quoteprice_aa, lob, countdown, m):\r\n\r\n mlofi_list = [0 for i in range(m)]\r\n cd_list = [0 for i in range(m)]\r\n ad_list = []\r\n n = 1\r\n\r\n while len(self.es_list) >= n:\r\n for i in range(m):\r\n mlofi_list[i] += self.es_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n n = 1\r\n\r\n while len(self.ds_list) >= n:\r\n for i in range(m):\r\n cd_list[i] += self.ds_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n for i in range(m):\r\n temp = None\r\n if n == 1:\r\n temp = cd_list[i]+1\r\n else:\r\n temp = cd_list[i]/(n-1)+1\r\n ad_list.append(temp)\r\n\r\n c = 5\r\n decay = 0.8\r\n offset = 0\r\n\r\n for i in range(m):\r\n offset += int(mlofi_list[i]*c*pow(decay,i)/ ad_list[i])\r\n\r\n\r\n benchmark = quoteprice_aa;\r\n if(lob['midprice'] != None):\r\n benchmark = lob['midprice']\r\n # print 'midprice is %d' % benchmark\r\n\r\n\r\n quoteprice_iaa = quoteprice_aa + 0.8 * (benchmark + offset - quoteprice_aa)\r\n if self.job == 'Bid' and quoteprice_iaa > self.limit:\r\n quoteprice_iaa = self.limit\r\n if self.job == 'Ask' and quoteprice_iaa < self.limit:\r\n quoteprice_iaa = self.limit\r\n\r\n\r\n\r\n if countdown < 0.3 :\r\n print \"insert\"\r\n if self.job == 'Bid' and (len(lob['asks']['lob']) >= 1) and lob['asks']['lob'][0][0] < self.limit:\r\n quoteprice_iaa = lob['asks']['lob'][0][0]\r\n if self.job == 'Ask' and (len(lob['bids']['lob']) >= 1) and lob['bids']['lob'][0][0] > self.limit:\r\n quoteprice_iaa = lob['bids']['lob'][0][0]\r\n\r\n if self.job == 'Bid' and quoteprice_iaa < bse_sys_minprice:\r\n quoteprice_iaa = bse_sys_minprice + 1\r\n if self.job == 'Ask' and quoteprice_iaa > bse_sys_maxprice:\r\n quoteprice_iaa = bse_sys_maxprice - 1\r\n\r\n return quoteprice_iaa\r\n\r\n quoteprice_iaa = imbalance_alter(quoteprice, lob, countdown,self.m)\r\n\r\n order = Order(self.tid,\r\n self.orders[0].atype,\r\n 'LIM',\r\n quoteprice_iaa,\r\n self.orders[0].qty,\r\n time, None, -1)\r\n self.lastquote = order\r\n return order\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n ## Begin nicked from ZIP\r\n\r\n # what, if anything, has happened on the bid LOB? Nicked from ZIP..\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['bestp']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][0][1]\r\n if self.prev_best_bid_p < lob_best_bid_p:\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or (\r\n (self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # # the bid LOB has been emptied: was it cancelled or hit?\r\n # last_tape_item = lob['tape'][-1]\r\n # if last_tape_item['type'] == 'Cancel' :\r\n # bid_hit = False\r\n # else:\r\n # bid_hit = True\r\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\r\n if trade != None:\r\n # a trade has occurred and the previously nonempty ask LOB is now empty\r\n # so assume best ask was lifted\r\n bid_hit = True\r\n else:\r\n bid_hit = False\r\n # what, if anything, has happened on the ask LOB?\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['bestp']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p:\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or (\r\n (self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n # last_tape_item = lob['tape'][-1]\r\n # if last_tape_item['type'] == 'Cancel' :\r\n # ask_lifted = False\r\n # else:\r\n # ask_lifted = True\r\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\r\n if trade != None:\r\n # a trade has occurred and the previously nonempty ask LOB is now empty\r\n # so assume best ask was lifted\r\n ask_lifted = True\r\n else:\r\n ask_lifted = False\r\n\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n ## End nicked from ZIP\r\n if (self.last_lob == None):\r\n self.last_lob = lob\r\n else:\r\n self.calc_es(lob, self.m, verbose)\r\n self.calc_ds(lob, self.m, verbose)\r\n self.last_lob = lob;\r\n\r\n if deal:\r\n self.previous_transactions.append(trade['price'])\r\n if self.sell_target == None:\r\n self.sell_target = trade['price']\r\n if self.buy_target == None:\r\n self.buy_target = trade['price']\r\n\r\n self.calcEq()\r\n self.calcAlpha()\r\n self.calcTheta()\r\n self.calcRshout()\r\n self.calcAgg()\r\n self.calcTarget()\r\n # print 'sell: ', self.sell_target, 'buy: ', self.buy_target, 'limit:', self.limit, 'eq: ', self.estimated_equilibrium[-1], 'sell_r: ', self.sell_r, 'buy_r: ', self.buy_r, '\\n'\r\n" }, { "alpha_fraction": 0.4192499816417694, "alphanum_fraction": 0.425166517496109, "avg_line_length": 52.079925537109375, "blob_id": "7b9244f78ed35fd5cafb057d7e0e9180887f650e", "content_id": "38af9c6a55a0233e17f1c0ec18e3b73dfa29b442", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112904, "license_type": "permissive", "max_line_length": 196, "num_lines": 2127, "path": "/ZhenZhang/source/BSE_trader_agents.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\n\n\n##################--Traders below here--#############\nimport random\nimport math\n##################--Traders below here--#############\n\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\n\n# Trader superclass\n# all Traders have a trader id, bank balance, blotter, and list of orders to execute\nclass Trader:\n\n def __init__(self, ttype, tid, balance, time):\n self.ttype = ttype # what type / strategy this trader is\n self.tid = tid # trader unique ID code\n self.balance = balance # money in the bank\n self.blotter = [] # record of trades executed\n self.orders = [] # customer orders currently being worked\n self.max_cust_orders = 1 # maximum number of distinct customer orders allowed at any one time.\n self.quotes = [] # distinct quotes currently live on the LOB\n self.max_quotes = 1 # maximum number of distinct quotes allowed on LOB\n self.willing = 1 # used in ZIP etc\n self.able = 1 # used in ZIP etc\n self.birthtime = time # used when calculating age of a trader/strategy\n self.profitpertime = 0 # profit per unit time\n self.n_trades = 0 # how many trades has this trader done?\n self.lastquote = None # record of what its most recent quote was/is (incl price)\n\n\n def __str__(self):\n blotterstring = ''\n for b in self.blotter :\n blotterstring = blotterstring + '[[%s], %s]' % (str(b[0]), b[1])\n return '[TID=%s type=%s balance=%s blotter=%s orders=%s n_trades=%s profitpertime=%s]' \\\n % (self.tid, self.ttype, self.balance, blotterstring, self.orders, self.n_trades, self.profitpertime)\n\n\n def add_cust_order(self, order, verbose):\n # add a customer order to trader's records\n # currently LAZY: keeps to within max_cust_orders by appending new order and deleting head self.orders\n if len(self.quotes) > 0:\n # this trader has a live quote on the LOB, from a previous customer order\n # need response to signal cancellation/withdrawal of that quote\n response = 'LOB_Cancel'\n else:\n response = 'Proceed'\n if len(self.orders) >= self.max_cust_orders:\n self.orders = self.orders[1:]\n self.orders.append(order)\n if verbose: print('add_order < response=%s self.orders=%s' % (response, str(self.orders)))\n return response\n\n\n # delete a customer order from trader's list of orders being worked\n def del_cust_order(self, cust_order_id, verbose):\n if verbose:\n print('>del_cust_order: Cust_orderID=%s; self.orders=' % cust_order_id)\n for o in self.orders: print('%s ' % str(o))\n\n cust_orders = []\n for co in self.orders:\n if co.assignmentid != cust_order_id: cust_orders.append(co)\n\n self.orders = cust_orders\n\n\n # revise a customer order: used after a PARTial fill on the exchange\n def revise_cust_order(self, cust_order_id, revised_order, verbose):\n if verbose:\n print('>revise_cust_order: Cust_orderID=%s; revised_order=%s, self.orders=' % (cust_order_id, revised_order))\n for o in self.orders: print('%s ' % str(o))\n\n cust_orders = []\n for co in self.orders:\n if co.assignmentid != cust_order_id: cust_orders.append(co)\n else:\n revised_assignment = co\n revised_assignment.qty = revised_order.qty\n cust_orders.append(revised_assignment)\n\n self.orders = cust_orders\n\n if verbose:\n print('<revise_cust_order: Cust_orderID=%s; revised_order=%s, self.orders=' % (cust_order_id, revised_order))\n for o in self.orders: print('%s ' % str(o))\n\n\n # delete an order/quote from the trader's list of its orders live on the exchange\n def del_exch_order(self, oid, verbose):\n if verbose:\n print('>del_exch_order: OID:%d; self.quotes=' % oid)\n for q in self.quotes: print('%s ' % str(q))\n\n exch_orders = []\n for eo in self.quotes:\n if eo.orderid != oid: exch_orders.append(eo)\n\n self.quotes= exch_orders\n\n\n def bookkeep(self, msg, time, verbose):\n # bookkeep(): trader book-keeping in response to message from the exchange\n # update records of what orders are still being worked, account balance, etc.\n # trader's blotter is a simple sequential record of each exchange messages received, and the trader's balance after bookeeping that msh\n\n if verbose: print('>bookkeep msg=%s bal=%d' % (msg, self.balance))\n\n profit = 0\n\n if msg.event == \"CAN\":\n # order was cancelled at the exchange\n # so delete the order from the trader's records of what quotes it has live on the exchange\n if verbose:\n print(\">CANcellation: msg=%s quotes=\" % str(msg))\n for q in self.quotes: print(\"%s\" % str(q))\n\n newquotes = []\n for q in self.quotes:\n if q.orderid != msg.oid:\n newquotes.append(q)\n self.quotes = newquotes\n\n if verbose:\n print(\"<CANcellation: quotes=\")\n for q in self.quotes: print(\"%s\" % str(q))\n\n\n # an individual order of some types (e.g. MKT) can fill via transactions at different prices\n # so the message that comes back from the exchange has transaction data in a list: will often be length=1\n\n if msg.event == \"FILL\" or msg.event == \"PART\":\n\n for trans in msg.trns:\n transactionprice = trans[\"Price\"]\n qty = trans[\"Qty\"]\n\n # find this LOB order in the trader's list of quotes sent to exchange\n exch_order = None\n for ord in self.quotes:\n if ord.orderid == msg.oid:\n exch_order = ord\n break\n if exch_order == None:\n s = 'FAIL: bookkeep() cant find order (msg.oid=%d) orders=' % msg.oid\n for ord in self.quotes: s = s + str(ord)\n sys.exit(s)\n\n cust_order_id = exch_order.myref\n cust_order = None\n for assignment in self.orders:\n if assignment.assignmentid == cust_order_id:\n cust_order = assignment\n break\n\n limitprice = cust_order.price\n\n if exch_order.otype == 'Bid':\n profit = (limitprice - transactionprice) * qty\n else:\n profit = (transactionprice - limitprice) * qty\n\n self.balance += profit\n self.n_trades += 1\n age = time - self.birthtime\n self.profitpertime = self.balance / age\n\n\n\n\n\n\n if verbose: print('Price=%d Limit=%d Q=%d Profit=%d N_trades=%d Age=%f Balance=%d' %\n (transactionprice, limitprice, qty, profit, self.n_trades, age, self.balance))\n\n if profit < 0 :\n print self.tid\n print self.ttype\n print profit\n print exch_order\n sys.exit('Exit: Negative profit')\n\n if verbose: print('%s: profit=%d bal=%d profit/time=%f' %\n (self.tid, profit, self.balance, self.profitpertime))\n\n # by the time we get to here, exch_order is instantiated\n cust_order_id = exch_order.myref\n\n if msg.event == \"FILL\":\n # this order has completed in full, so it thereby completes the corresponding customer order\n # so delete both the customer order from trader's record of those\n # and the order has already been deleted from the exchange's records, so also needs to be deleted from trader's records of orders held at exchange\n cust_order_id = exch_order.myref\n if verbose: print('>bookkeep() deleting customer order ID=%s' % cust_order_id)\n self.del_cust_order(cust_order_id, verbose) # delete this customer order\n if verbose: print(\">bookkeep() deleting OID:%d from trader's exchange-order records\" % exch_order.orderid)\n self.del_exch_order(exch_order.orderid, verbose) # delete the exchange-order from trader's records\n\n elif msg.event == \"PART\":\n # the customer order is still live, but its quantity needs updating\n if verbose: print('>bookkeep() PART-filled order updating qty on customer order ID=%s' % cust_order_id)\n self.revise_cust_order(cust_order_id, msg.revo, verbose) # delete this customer order\n\n if exch_order.ostyle == \"IOC\":\n # a partially filled IOC has the non-filled portion cancelled at the exchange,\n # so the trader's order records need to be updated accordingly\n if verbose: print(\">bookkeep() PART-filled IOC cancels remainder: deleting OID:%d from trader's exchange-order records\" % exch_order.orderid)\n self.del_exch_order(exch_order.orderid, verbose) # delete the exchange-order from trader's records\n\n self.blotter.append([msg, self.balance]) # add trade record to trader's blotter\n\n\n\n # specify how trader responds to events in the market\n # this is a null action, expect it to be overloaded by specific algos\n def respond(self, time, lob, trade, verbose):\n return None\n\n\n # specify how trader mutates its parameter values\n # this is a null action, expect it to be overloaded by specific algos\n def mutate(self, time, lob, trade, verbose):\n return None\n\n\n# Trader subclass Giveaway\n# even dumber than a ZI-U: just give the deal away\n# (but never makes a loss)\nclass Trader_Giveaway(Trader):\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print('GVWY getorder:')\n\n if len(self.orders) < 1:\n order = None\n else:\n quoteprice = self.orders[0].price\n order = Order(self.tid,\n self.orders[0].atype,\n self.orders[0].astyle,\n quoteprice,\n self.orders[0].qty,\n time, None, -1)\n self.lastquote=order\n return order\n\n\n\n# Trader subclass ZI-C\n# After Gode & Sunder 1993\nclass Trader_ZIC(Trader):\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print('ZIC getorder:')\n\n if len(self.orders) < 1:\n # no orders: return NULL\n order = None\n else:\n minprice = lob['bids']['worstp']\n maxprice = lob['asks']['worstp']\n\n limit = self.orders[0].price\n otype = self.orders[0].atype\n ostyle = self.orders[0].astyle\n if otype == 'Bid':\n oprice = random.randint(minprice, limit)\n else:\n oprice = random.randint(limit, maxprice)\n # NB should check it == 'Ask' and barf if not\n order = Order(self.tid, otype, ostyle, oprice, self.orders[0].qty, time, None, -1)\n self.lastquote = order\n return order\n\n\n\n# Trader subclass Shaver\n# shaves a penny off the best price\nclass Trader_Shaver(Trader):\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print(\"SHVR getorder:\")\n\n if len(self.orders) < 1:\n order = None\n else:\n if verbose: print(\" self.orders[0]=%s\" % str(self.orders[0]))\n limitprice = self.orders[0].price\n otype = self.orders[0].atype\n ostyle = self.orders[0].astyle\n if otype == 'Bid':\n if lob['bids']['n'] > 0:\n quoteprice = lob['bids']['bestp'] + 1\n if quoteprice > limitprice :\n quoteprice = limitprice\n else:\n quoteprice = lob['bids']['worstp']\n else:\n if lob['asks']['n'] > 0:\n quoteprice = lob['asks']['bestp'] - 1\n if quoteprice < limitprice:\n quoteprice = limitprice\n else:\n quoteprice = lob['asks']['worstp']\n order = Order(self.tid, otype, ostyle, quoteprice, self.orders[0].qty, time, None, -1)\n self.lastquote = order\n return order\n\n\n\n# Trader subclass Imbalance-sensitive Shaver\n# shaves X off the best price, where X depends on supply/demand imbalance\nclass Trader_ISHV(Trader):\n\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print(\"ISHV getorder:\")\n\n shave_c = 2 # c in the y=mx+c linear mapping from imbalance to shave amount\n shave_m = 1 # m in the y=mx+c\n\n if len(self.orders) < 1:\n order = None\n else:\n if verbose: print(\" self.orders[0]=%s\" % str(self.orders[0]))\n limitprice = self.orders[0].price\n otype = self.orders[0].atype\n ostyle = self.orders[0].astyle\n\n microp = lob['microprice']\n midp = lob['midprice']\n\n if microp != None and midp != None:\n imbalance = microp - midp\n else: imbalance = 0 # if imbalance is undefined, proceed as if it is equal to zero\n\n\n if otype == 'Bid':\n\n # quantity sensitivity\n if imbalance < 0 : shaving = 1 # imbalance in favour of buyers, so shave slowly\n else: shaving = shave_c + (shave_m * int(imbalance*100)/100) # shave ever larger amounts\n\n # print('t:%f, ISHV (Bid) imbalance=%s shaving=%s' % (time, imbalance, shaving))\n\n if len(lob['bids']['lob']) > 0:\n quoteprice = lob['bids']['bestp'] + shaving\n if quoteprice > limitprice :\n quoteprice = limitprice\n else:\n quoteprice = 1 #KLUDGE -- come back to fix todo\n else:\n # quantity sensitivity\n if imbalance > 0 : shaving = 1\n else: shaving = shave_c - (shave_m * int(imbalance*100)/100)\n\n # print('t:%f, ISHV (Ask) imbalance=%s shaving=%s' % (time, imbalance, shaving))\n\n if len(lob['asks']['lob']) > 0:\n quoteprice = lob['asks']['bestp'] - shaving\n if quoteprice < limitprice :\n quoteprice = limitprice\n else:\n quoteprice = 200 #KLUDGE -- come back to fix todo\n\n order = Order(self.tid, otype, ostyle, quoteprice, self.orders[0].qty, time, None, verbose)\n self.lastquote = order\n return order\n\n\n\n# Trader subclass Sniper\n# Based on Shaver, inspired by Kaplan\n# \"lurks\" until time remaining < threshold% of the trading session\n# then gets increasing aggressive, increasing \"shave thickness\" as time runs out\nclass Trader_Sniper(Trader):\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print('SNPR getorder: self.orders[0]=%s' % str(self.orders[0]))\n\n lurk_threshold = 0.2\n shavegrowthrate = 3\n shave = int(1.0 / (0.01 + countdown / (shavegrowthrate * lurk_threshold)))\n if (len(self.orders) < 1) or (countdown > lurk_threshold):\n order = None\n else:\n limitprice = self.orders[0].price\n otype = self.orders[0].otype\n ostyle = self.orders[0].ostyle\n if otype == 'Bid':\n if lob['bids']['n'] > 0:\n oprice = lob['bids']['bestp'] + shave\n if oprice > limitprice:\n oprice = limitprice\n else:\n oprice = lob['bids']['worstp']\n else:\n if lob['asks']['n'] > 0:\n oprice = lob['asks']['bestp'] - shave\n if oprice < limitprice:\n oprice = limitprice\n else:\n oprice = lob['asks']['worstp']\n order = Order(self.tid, otype, ostyle, oprice, self.orders[0].qty, time, None, -1)\n self.lastquote = order\n return order\n\n\n\n# Trader subclass ZIP\n# After Cliff 1997\nclass Trader_ZIP(Trader):\n\n # ZIP init key param-values are those used in Cliff's 1997 original HP Labs tech report\n # NB this implementation keeps separate margin values for buying & selling,\n # so a single trader can both buy AND sell\n # -- in the original, traders were either buyers OR sellers\n\n def __init__(self, ttype, tid, balance, time):\n Trader.__init__(self, ttype, tid, balance, time)\n m_fix = 0.05\n m_var = 0.05\n self.job = None # this is 'Bid' or 'Ask' depending on customer order\n self.active = False # gets switched to True while actively working an order\n self.prev_change = 0 # this was called last_d in Cliff'97\n self.beta = 0.1 + 0.2 * random.random() # learning rate\n self.momntm = 0.3 * random.random() # momentum\n self.ca = 0.10 # self.ca & .cr were hard-coded in '97 but parameterised later\n self.cr = 0.10\n self.margin = None # this was called profit in Cliff'97\n self.margin_buy = -1.0 * (m_fix + m_var * random.random())\n self.margin_sell = m_fix + m_var * random.random()\n self.price = None\n self.limit = None\n # memory of best price & quantity of best bid and ask, on LOB on previous update\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n # memory of worst prices from customer orders received so far\n self.worst_bidprice = None\n self.worst_askprice = None\n\n\n def __str__(self):\n s = '%s, job=, %s, ' % (self.tid, self.job)\n if self.active == True: s = s +'actv=,T, '\n else: s = s + 'actv=,F, '\n if self.margin == None: s = s + 'mrgn=,N, '\n else: s = s + 'mrgn=,%5.2f, ' % self.margin\n s = s + 'lmt=,%s, price=,%s, bestbid=,%s,@,%s, bestask=,%s,@,%s, wrstbid=,%s, wrstask=,%s' %\\\n (self.limit, self.price, self.prev_best_bid_q, self.prev_best_bid_p, self.prev_best_ask_q, self.prev_best_ask_p, self.worst_bidprice, self.worst_askprice)\n return(s)\n\n\n def getorder(self, time, countdown, lob, verbose):\n\n if verbose: print('ZIP getorder(): LOB=%s' % lob)\n\n # random coefficient, multiplier on trader's own estimate of worst possible bid/ask prices\n # currently in arbitrarily chosen range [2, 5]\n worst_coeff = 2 + (3 * random.random())\n\n if len(self.orders) < 1:\n self.active = False\n order = None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].atype\n if self.job == 'Bid':\n # currently a buyer (working a bid order)\n self.margin = self.margin_buy\n # what is the worst bid price on the LOB right now?\n if len(lob['bids']['lob']) > 0 :\n # take price of final entry on LOB\n worst_bid = lob['bids']['lob'][-1][0]\n else:\n # local pessimistic estimate of the worst bid price (own version of stub quote)\n worst_bid = max(1, int(self.limit / worst_coeff))\n if self.worst_bidprice == None: self.worst_bidprice = worst_bid\n elif self.worst_bidprice > worst_bid: self.worst_bidprice = worst_bid\n else:\n # currently a seller (working a sell order)\n self.margin = self.margin_sell\n # what is the worst ask price on the LOB right now?\n if len(lob['asks']['lob']) > 0 :\n # take price of final entry on LOB\n worst_ask = lob['asks']['lob'][-1][0]\n else:\n # local pessimistic estimate of the worst ask price (own version of stub quote)\n worst_ask = int(self.limit * worst_coeff)\n if self.worst_askprice == None: self.worst_askprice = worst_ask\n elif self.worst_askprice < worst_ask: self.worst_askprice = worst_ask\n\n quoteprice = int(self.limit * (1 + self.margin))\n self.price = quoteprice\n\n order = Order(self.tid, self.job, \"LIM\", quoteprice, self.orders[0].qty, time, None, -1)\n self.lastquote = order\n\n return order\n\n\n # update margin on basis of what happened in market\n def respond(self, time, lob, trade, verbose):\n # ZIP trader responds to market events, altering its margin\n # does this whether it currently has an order to work or not\n\n def target_up(price):\n # generate a higher target price by randomly perturbing given price\n ptrb_abs = self.ca * random.random() # absolute shift\n ptrb_rel = price * (1.0 + (self.cr * random.random())) # relative shift\n target = int(round(ptrb_rel + ptrb_abs, 0))\n if target == price: target = price + 1 # enforce minimal difference\n # print('TargetUp: %d %d\\n' % (price, target))\n return(target)\n\n\n def target_down(price):\n # generate a lower target price by randomly perturbing given price\n ptrb_abs = self.ca * random.random() # absolute shift\n ptrb_rel = price * (1.0 - (self.cr * random.random())) # relative shift\n target = int(round(ptrb_rel - ptrb_abs, 0))\n if target == price : target = price -1 # enforce minimal difference\n # print('TargetDn: %d %d\\n' % (price,target))\n return(target)\n\n\n def microshade(microprice, price):\n # shade in the direction of the microprice\n microweight = 0\n if microprice != None: shaded = ((microweight * microprice) + ((1 - microweight) * price))\n else: shaded = price\n # print('Microshade: micro=%s price=%s shaded=%s' % (microprice, price, shaded))\n return(shaded)\n\n\n def willing_to_trade(price):\n # am I willing to trade at this price?\n willing = False\n if self.job == 'Bid' and self.active and self.price >= price:\n willing = True\n if self.job == 'Ask' and self.active and self.price <= price:\n willing = True\n return willing\n\n\n def profit_alter(*argv):\n # this has variable number of parameters\n # if passed a single numeric value, that's the target price\n # if passed three numeric values, that's the price, beta (learning rate), and momentum\n if len(argv) == 1 :\n price = argv[0]\n beta = self.beta\n momntm = self.momntm\n elif len(argv) == 3 :\n price = argv[0]\n beta = argv[1]\n momntm = argv[2]\n else:\n sys.stdout.flush()\n sys.exit('Fail: ZIP profit_alter given wrong number of parameters')\n\n # print('profit_alter: price=%s beta=%s momntm=%s' % (price, beta, momntm))\n oldprice = self.price\n diff = price - oldprice\n change = ((1.0 - self.momntm) * (self.beta * diff)) + (self.momntm * self.prev_change)\n self.prev_change = change\n newmargin = ((self.price + change) / self.limit) - 1.0\n\n if self.job == 'Bid':\n margin = min(newmargin, 0)\n self.margin_buy = margin\n self.margin = margin\n else :\n margin = max(0, newmargin)\n self.margin_sell = margin\n self.margin = margin\n\n # set the price from limit and profit-margin\n self.price = int(round(self.limit * (1.0 + self.margin), 0))\n # print('old=%d diff=%d change=%d lim=%d price = %d\\n' % (oldprice, diff, change, self.limit, self.price))\n\n\n if verbose and trade != None: print('respond() [ZIP] time=%s tid=%s, trade=%s LOB[bids]=%s LOB[asks]=%s' %\n (time, self.tid, trade, lob[\"bids\"], lob[\"asks\"]))\n\n\n # what, if anything, has happened on the bid LOB?\n\n # if trade != None: print('ZIP respond() trade=%s' % trade)\n\n bid_improved = False\n bid_hit = False\n\n if len(lob['bids']['lob']) > 0: lob_best_bid_p = lob['bids']['lob'][0][0]\n else: lob_best_bid_p = None\n\n lob_best_bid_q = None # default assumption\n\n if lob_best_bid_p != None:\n # non-empty bid LOB\n\n if self.prev_best_bid_p > lob_best_bid_p : best_bid_p_decreased = True\n else: best_bid_p_decreased = False\n\n if (self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q): same_p_smaller_q = True\n else: same_p_smaller_q = False\n\n lob_best_bid_q = lob['bids']['lob'][0][1]\n\n if self.prev_best_bid_p < lob_best_bid_p :\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade != None and (best_bid_p_decreased or same_p_smaller_q) :\n # there WAS a trade and either...\n # ... (best bid price has gone DOWN) or (best bid price is same but quantity at that price has gone DOWN)\n # then assume previous best bid was hit\n bid_hit = True\n\n elif self.prev_best_bid_p != None:\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n bid_hit = True\n else:\n bid_hit = False\n\n # if verbose: print(\"LOB[bids]=%s bid_improved=%s bid_hit=%s\" % (lob['bids'], bid_improved, bid_hit))\n\n\n # what, if anything, has happened on the ask LOB?\n\n ask_improved = False\n ask_lifted = False\n\n if len(lob['asks']['lob']) > 0: lob_best_ask_p = lob['asks']['lob'][0][0]\n else: lob_best_ask_p = None\n\n lob_best_ask_q = None\n\n if lob_best_ask_p != None:\n # non-empty ask LOB\n\n if self.prev_best_ask_p < lob_best_ask_p: best_ask_p_increased = True\n else: best_ask_p_increased = False\n\n if (self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q): same_p_smaller_q = True\n else: same_p_smaller_q = False\n\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if self.prev_best_ask_p > lob_best_ask_p :\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade != None and (best_ask_p_increased or same_p_smaller_q):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\n ask_lifted = True\n\n elif self.prev_best_ask_p != None:\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n ask_lifted = True\n else:\n ask_lifted = False\n\n\n # if verbose: print(\"LOB[asks]=%s ask_improved=%s ask_lifted=%s\" % (lob['asks'], ask_improved, ask_lifted))\n\n\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\n print('ZIP respond() B_improved=%s; B_hit=%s A_improved=%s, A_lifted=%s' % (bid_improved, bid_hit, ask_improved, ask_lifted))\n print('Trade=%s\\n' % trade)\n\n\n # we want to know: did a deal just happen?\n # if not, did the most recent bid\n\n\n deal = bid_hit or ask_lifted\n\n\n # previously...\n # when raising margin, tradeprice = trade['price'], targetprice = f(tradeprice) &\n # i.e. target price will be calculated relative to price of most recent transaction\n # and when lowering margin, targetprice = f(best_price_on_counterparty_side_of_LOB) or\n # or if LOB empty then targetprice = f(worst possible counterparty quote) <-- a system constant\n\n\n # new in this version:\n # take account of LOB's microprice if it is defined (if not, use trade['price'] as before)\n\n midp = lob['midprice']\n microp = lob['microprice']\n\n # KLUDGE for TESTING\n if time > 79: microp = 145\n\n if microp != None and midp != None :\n imbalance = microp - midp\n else:\n imbalance = 0 # uses zero instead of None because a zero imbalance reverts ZIP to original form\n\n\n target_price = None # default assumption\n\n # print('self.job=%s' % self.job)\n\n if self.job == 'Ask':\n # seller\n if deal:\n if verbose: print ('trade',trade)\n tradeprice = trade['price'] # price of most recent transaction\n # print('tradeprice=%s lob[microprice]=%s' % (tradeprice, lob['microprice']))\n shadetrade = microshade(lob['microprice'], tradeprice)\n refprice = shadetrade\n\n if self.price <= tradeprice:\n # could sell for more? raise margin\n target_price = target_up(refprice)\n profit_alter(target_price)\n elif ask_lifted and self.active and not willing_to_trade(tradeprice):\n # previous best ask was hit,\n # but this trader wouldn't have got the deal cos price to high,\n # and still working a customer order, so reduce margin\n target_price = target_down(refprice)\n profit_alter(target_price)\n else:\n # no deal: aim for a target price higher than best bid\n # print('lob_best_bid_p=%s lob[microprice]=%s' % (lob_best_bid_p, lob['microprice']))\n refprice = microshade(lob['microprice'], lob_best_bid_p)\n\n if ask_improved and self.price > lob_best_bid_p:\n if lob_best_bid_p != None:\n target_price = target_up(lob_best_bid_p)\n else:\n if self.worst_askprice != None:\n target_price = self.worst_askprice\n # print('worst_askprice = %s' % self.worst_askprice)\n target_price = None #todo: does this stop the price-spikes?\n else: target_price = None\n # target_price = lob['asks']['worstp'] # stub quote\n if target_price != None:\n # print('PA1: tp=%s' % target_price)\n profit_alter(target_price)\n\n if self.job == 'Bid':\n # buyer\n if deal:\n tradeprice = trade['price']\n shadetrade = microshade(lob['microprice'], tradeprice)\n refprice = shadetrade\n\n if lob['microprice'] != None and lob['midprice'] != None:\n delta = lob['microprice'] - lob['midprice']\n # refprice = refprice + delta\n\n if self.price >= tradeprice :\n # could buy for less? raise margin (i.e. cut the price)\n target_price = target_down(refprice)\n profit_alter(target_price)\n elif bid_hit and self.active and not willing_to_trade(tradeprice):\n # wouldn't have got this deal, and still working a customer order,\n # so reduce margin\n target_price = target_up(refprice)\n profit_alter(target_price)\n else:\n # no deal: aim for target price lower than best ask\n refprice = microshade(lob['microprice'], lob_best_ask_p)\n if bid_improved and self.price < lob_best_ask_p:\n if lob_best_ask_p != None:\n target_price = target_down(lob_best_ask_p)\n else:\n if self.worst_bidprice != None :\n target_price = self.worst_bidprice\n target_price = None\n else: target_price = None\n # target_price = lob['bids']['worstp'] # stub quote\n if target_price != None:\n # print('PA2: tp=%s' % target_price)\n profit_alter(target_price)\n\n # print('time,%f,>>>,microprice,%s,>>>,target_price,%s' % (time, lob['microprice'], target_price))\n\n # remember the best LOB data ready for next response\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_bid_q = lob_best_bid_q\n self.prev_best_ask_p = lob_best_ask_p\n self.prev_best_ask_q = lob_best_ask_q\n\n\n\n##########################---trader-types have all been defined now--################\n\n\nclass Trader_AA(Trader):\n\n def __init__(self, ttype, tid, balance, time):\n # Stuff about trader\n # self.ttype = ttype\n # self.tid = tid\n # self.balance = balance\n # self.birthtime = time\n # self.profitpertime = 0\n # self.n_trades = 0\n # self.blotter = []\n # self.orders = []\n # self.n_quotes = 0\n # self.lastquote = None\n Trader.__init__(self, ttype, tid, balance, time)\n\n self.limit = None\n self.job = None\n\n # learning variables\n self.r_shout_change_relative = 0.05\n self.r_shout_change_absolute = 0.05\n self.short_term_learning_rate = random.uniform(0.1, 0.5)\n self.long_term_learning_rate = random.uniform(0.1, 0.5)\n self.moving_average_weight_decay = 0.95 # how fast weight decays with time, lower is quicker, 0.9 in vytelingum\n self.moving_average_window_size = 5\n self.offer_change_rate = 3.0\n self.theta = -2.0\n self.theta_max = 2.0\n self.theta_min = -8.0\n self.marketMax = bse_sys_maxprice\n\n # Variables to describe the market\n self.previous_transactions = []\n self.moving_average_weights = []\n for i in range(self.moving_average_window_size):\n self.moving_average_weights.append(self.moving_average_weight_decay**i)\n self.estimated_equilibrium = []\n self.smiths_alpha = []\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n\n # Trading Variables\n self.r_shout = None\n self.buy_target = None\n self.sell_target = None\n self.buy_r = -1.0 * (0.3 * random.random())\n self.sell_r = -1.0 * (0.3 * random.random())\n\n\n\n def calcEq(self): ##clear and correct\n # Slightly modified from paper, it is unclear inpaper\n # N previous transactions * weights / N in vytelingum, swap N denominator for sum of weights to be correct?\n if len(self.previous_transactions) == 0:\n return\n elif len(self.previous_transactions) < self.moving_average_window_size:\n # Not enough transactions\n self.estimated_equilibrium.append(float(sum(self.previous_transactions)) / max(len(self.previous_transactions), 1))\n else:\n N_previous_transactions = self.previous_transactions[-self.moving_average_window_size:]\n thing = [N_previous_transactions[i]*self.moving_average_weights[i] for i in range(self.moving_average_window_size)]\n eq = sum( thing ) / sum(self.moving_average_weights)\n self.estimated_equilibrium.append(eq)\n\n def calcAlpha(self): ##correct. but calcAlpha in snashall's version is incorrect\n alpha = 0.0\n for p in self.previous_transactions:\n alpha += (p - self.estimated_equilibrium[-1])**2\n alpha = math.sqrt(alpha/len(self.previous_transactions))\n self.smiths_alpha.append( alpha/self.estimated_equilibrium[-1] )\n\n def calcTheta(self): ## clear and correct\n gamma = 2.0 #not sensitive apparently so choose to be whatever\n # necessary for intialisation, div by 0\n if min(self.smiths_alpha) == max(self.smiths_alpha):\n alpha_range = 0.4 #starting value i guess\n else:\n alpha_range = (self.smiths_alpha[-1] - min(self.smiths_alpha)) / (max(self.smiths_alpha) - min(self.smiths_alpha))\n theta_range = self.theta_max - self.theta_min\n desired_theta = self.theta_min + (theta_range) * (1 - alpha_range) * math.exp(gamma * (alpha_range - 1))\n self.theta = self.theta + self.long_term_learning_rate * (desired_theta - self.theta)\n if self.theta > self.theta_max :\n self.theta = self.theta_max\n if self.theta < self.theta_min :\n self.theta = self.theta_min\n\n def calcRshout(self): ## unclear in Vytelingum's paper\n p = self.estimated_equilibrium[-1]\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n # Currently a buyer\n if l <= p: #extramarginal!\n self.r_shout = 0.0\n else: #intramarginal :(\n if self.buy_target > self.estimated_equilibrium[-1]:\n #r[0,1]\n self.r_shout = math.log(((self.buy_target - p) * (math.exp(theta) - 1) / (l - p)) + 1) / theta\n else:\n #r[-1,0]\n # print 'buy_target: %f , p: %f , theta: %f' %(self.buy_target,p,theta)\n self.r_shout = math.log((1 - (self.buy_target/p)) * (math.exp(theta) - 1) + 1) / theta\n # self.r_shout = self.buy_r\n\n\n if self.job == 'Ask':\n # Currently a seller\n if l >= p: #extramarginal!\n self.r_shout = 0\n else: #intramarginal :(\n if self.sell_target > self.estimated_equilibrium[-1]:\n # r[-1,0]\n self.r_shout = math.log((self.sell_target - p) * (math.exp(theta) - 1) / (self.marketMax - p) + 1) / theta\n else:\n # r[0,1]\n a = (self.sell_target-l)/(p-l)\n self.r_shout = (math.log((1 - a) * (math.exp(theta) - 1) + 1)) / theta\n # self.r_shout = self.sell_r\n\n def calcAgg(self):\n delta = 0\n if self.job == 'Bid':\n # BUYER\n if self.buy_target >= self.previous_transactions[-1] :\n # must be more aggressive\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.buy_r = self.buy_r + self.short_term_learning_rate * (delta - self.buy_r)\n\n if self.job == 'Ask':\n # SELLER\n if self.sell_target > self.previous_transactions[-1] :\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.sell_r = self.sell_r + self.short_term_learning_rate * (delta - self.sell_r)\n\n def calcTarget(self):\n if len(self.estimated_equilibrium) > 0:\n p = self.estimated_equilibrium[-1]\n if self.limit == p:\n p = p * 1.000001 # to prevent theta_bar = 0\n elif self.job == 'Bid':\n p = self.limit - self.limit * 0.2 ## Initial guess for eq if no deals yet!!....\n elif self.job == 'Ask':\n p = self.limit + self.limit * 0.2\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n #BUYER\n minus_thing = self.buy_r * math.exp(theta*(self.buy_r-1))\n\n if l <= p: #Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l * (1 - minus_thing)\n else: #intramarginal\n if self.buy_r >= 0:\n # theta_ba = (p * math.exp(-theta))/(l-p)-1\n theta_ba = theta\n # print 'theta: %f' %(self.theta)\n # print 'theta_ba: %f '%(theta_ba)\n # print 'l-p: %f '%(l-p)\n # print 'self.buy_r :%f' %(self.buy_r)\n\n self.buy_target = (l-p)*(1-(self.buy_r+1)*math.exp(self.buy_r*theta_ba))+p\n else:\n self.buy_target = p*(1-minus_thing)\n if self.buy_target > l:\n self.buy_target = l\n if self.buy_target <bse_sys_minprice :\n self.buy_target = bse_sys_minprice\n # print 'buy_target = %f'%(self.buy_target)\n\n if self.job == 'Ask':\n #SELLER\n\n if l <= p: #Intramarginal\n if self.buy_r >= 0:\n self.buy_target = p + (p-l)* self.sell_r*math.exp((self.sell_r-1)*theta)\n else:\n theta_ba = math.log((self.marketMax-p)/(p-l))-theta\n self.buy_target = p + (self.marketMax-p)* self.sell_r*math.exp((self.sell_r+1)*theta_ba)\n else: # Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l + (self.marketMax-l)*self.sell_r*math.exp((self.sell_r-1)*theta)\n if self.sell_target < l:\n self.sell_target = l\n if self.sell_target > bse_sys_maxprice:\n self.sell_target = bse_sys_maxprice\n # print 'sell_target = %f'%(self.sell_target)\n\n def getorder(self, time, countdown, lob,verbose):\n if len(self.orders) < 1:\n self.active = False\n return None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].atype\n self.calcTarget()\n\n if self.prev_best_bid_p == None:\n o_bid = 0\n else:\n o_bid = self.prev_best_bid_p\n if self.prev_best_ask_p == None:\n o_ask = self.marketMax\n else:\n o_ask = self.prev_best_ask_p\n\n if self.job == 'Bid': #BUYER\n if self.limit <= o_bid:\n return None\n else:\n if len(self.previous_transactions) <= 0: ## has been at least one transaction\n o_ask_plus = (1+self.r_shout_change_relative)*o_ask + self.r_shout_change_absolute\n quoteprice = o_bid + ((min(self.limit, o_ask_plus) - o_bid) / self.offer_change_rate)\n else:\n if o_ask <= self.buy_target:\n quoteprice = o_ask\n else:\n quoteprice = o_bid + ((self.buy_target - o_bid) / self.offer_change_rate)\n if self.job == 'Ask':\n if self.limit >= o_ask:\n return None\n else:\n if len(self.previous_transactions) <= 0: ## has been at least one transaction\n o_bid_minus = (1-self.r_shout_change_relative) * o_bid - self.r_shout_change_absolute\n quoteprice = o_ask - ((o_ask - max(self.limit, o_bid_minus)) / self.offer_change_rate)\n else:\n if o_bid >= self.sell_target:\n quoteprice = o_bid\n else:\n quoteprice = o_ask - ((o_ask - self.sell_target) / self.offer_change_rate)\n def imbalancealter (quoteprice_aa, lob):\n if(lob['microprice']==None or lob['midprice']==None): return quoteprice_aa\n quoteprice_iaa = 0\n imbalance_ratio = 0\n volume_bids = 0\n volume_asks = 0\n count_bids_depth = 0\n count_asks_depth = 0\n for item in lob['bids']['lob']:\n volume_bids += math.exp(-0.5*count_bids_depth) *item[1]\n count_bids_depth +=1\n if(count_bids_depth >=2): break\n for item in lob['asks']['lob']:\n volume_asks += math.exp(-0.5*count_asks_depth) *item[1]\n count_asks_depth +=1\n if(count_asks_depth >=2): break\n if volume_bids == 0 and volume_asks == 0:\n return quoteprice_aa\n else :\n imbalance_ratio = (volume_bids-volume_asks)/(volume_bids+volume_asks)\n if self.job == 'Bid':\n quoteprice_iaa = quoteprice_aa+imbalance_ratio*(lob['microprice']-quoteprice_aa)\n if(quoteprice_iaa>self.limit):\n quoteprice_iaa = self.limit\n else:\n quoteprice_iaa = quoteprice_aa+ imbalance_ratio*(quoteprice_aa-lob['microprice'])\n if(quoteprice_iaa<self.limit):\n quoteprice_iaa = self.limit\n\n if count_bids_depth/count_asks_depth >=3 or count_asks_depth/count_bids_depth>=3 :\n return quoteprice_iaa\n else: return quoteprice_aa\n # return quoteprice_aa\n return quoteprice_iaa\n # quoteprice_iaa = imbalancealter(quoteprice,lob)\n\n order = Order(self.tid,\n self.orders[0].atype,\n 'LIM',\n quoteprice,\n self.orders[0].qty,\n time, None, -1)\n self.lastquote=order\n return order\n\n def respond(self, time, lob, trade, verbose):\n ## Begin nicked from ZIP\n\n # what, if anything, has happened on the bid LOB? Nicked from ZIP..\n bid_improved = False\n bid_hit = False\n lob_best_bid_p = lob['bids']['bestp']\n lob_best_bid_q = None\n if lob_best_bid_p != None:\n # non-empty bid LOB\n lob_best_bid_q = lob['bids']['lob'][0][1]\n if self.prev_best_bid_p < lob_best_bid_p :\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\n # previous best bid was hit\n bid_hit = True\n elif self.prev_best_bid_p != None:\n # # the bid LOB has been emptied: was it cancelled or hit?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # bid_hit = False\n # else:\n # bid_hit = True\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n bid_hit = True\n else:\n bid_hit = False\n # what, if anything, has happened on the ask LOB?\n ask_improved = False\n ask_lifted = False\n lob_best_ask_p = lob['asks']['bestp']\n lob_best_ask_q = None\n if lob_best_ask_p != None:\n # non-empty ask LOB\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if self.prev_best_ask_p > lob_best_ask_p :\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\n ask_lifted = True\n elif self.prev_best_ask_p != None:\n # the ask LOB is empty now but was not previously: canceled or lifted?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # ask_lifted = False\n # else:\n # ask_lifted = True\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n ask_lifted = True\n else:\n ask_lifted = False\n\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_bid_q = lob_best_bid_q\n self.prev_best_ask_p = lob_best_ask_p\n self.prev_best_ask_q = lob_best_ask_q\n\n deal = bid_hit or ask_lifted\n\n ## End nicked from ZIP\n\n if deal:\n self.previous_transactions.append(trade['price'])\n if self.sell_target == None:\n self.sell_target = trade['price']\n if self.buy_target == None:\n self.buy_target = trade['price']\n self.calcEq()\n self.calcAlpha()\n self.calcTheta()\n self.calcRshout()\n self.calcAgg()\n self.calcTarget()\n #print 'sell: ', self.sell_target, 'buy: ', self.buy_target, 'limit:', self.limit, 'eq: ', self.estimated_equilibrium[-1], 'sell_r: ', self.sell_r, 'buy_r: ', self.buy_r, '\\n'\n\n\nclass Trader_OAA(Trader):\n\n def __init__(self, ttype, tid, balance, time):\n # Stuff about trader\n # self.ttype = ttype\n # self.tid = tid\n # self.balance = balance\n # self.birthtime = time\n # self.profitpertime = 0\n # self.n_trades = 0\n # self.blotter = []\n # self.orders = []\n # self.n_quotes = 0\n # self.lastquote = None\n Trader.__init__(self, ttype, tid, balance, time)\n\n self.limit = None\n self.job = None\n\n # learning variables\n self.r_shout_change_relative = 0.05\n self.r_shout_change_absolute = 0.05\n self.short_term_learning_rate = random.uniform(0.1, 0.5)\n self.long_term_learning_rate = random.uniform(0.1, 0.5)\n self.moving_average_weight_decay = 0.95 # how fast weight decays with time, lower is quicker, 0.9 in vytelingum\n self.moving_average_window_size = 5\n self.offer_change_rate = 3.0\n self.theta = -2.0\n self.theta_max = 2.0\n self.theta_min = -8.0\n self.marketMax = bse_sys_maxprice\n\n # Variables to describe the market\n self.previous_transactions = []\n self.moving_average_weights = []\n for i in range(self.moving_average_window_size):\n self.moving_average_weights.append(self.moving_average_weight_decay**i)\n self.estimated_equilibrium = []\n self.smiths_alpha = []\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n\n # Trading Variables\n self.r_shout = None\n self.buy_target = None\n self.sell_target = None\n self.buy_r = -1.0 * (0.3 * random.random())\n self.sell_r = -1.0 * (0.3 * random.random())\n\n\n\n def calcEq(self):\n # Slightly modified from paper, it is unclear inpaper\n # N previous transactions * weights / N in vytelingum, swap N denominator for sum of weights to be correct?\n if len(self.previous_transactions) == 0:\n return\n elif len(self.previous_transactions) < self.moving_average_window_size:\n # Not enough transactions\n self.estimated_equilibrium.append(float(sum(self.previous_transactions)) / max(len(self.previous_transactions), 1))\n else:\n N_previous_transactions = self.previous_transactions[-self.moving_average_window_size:]\n thing = [N_previous_transactions[i]*self.moving_average_weights[i] for i in range(self.moving_average_window_size)]\n eq = sum( thing ) / sum(self.moving_average_weights)\n self.estimated_equilibrium.append(eq)\n\n def calcAlpha(self):\n alpha = 0.0\n for p in self.estimated_equilibrium:\n alpha += (p - self.estimated_equilibrium[-1])**2\n alpha = math.sqrt(alpha/len(self.estimated_equilibrium))\n self.smiths_alpha.append( alpha/self.estimated_equilibrium[-1] )\n\n def calcTheta(self):\n gamma = 2.0 #not sensitive apparently so choose to be whatever\n # necessary for intialisation, div by 0\n if min(self.smiths_alpha) == max(self.smiths_alpha):\n alpha_range = 0.4 #starting value i guess\n else:\n alpha_range = (self.smiths_alpha[-1] - min(self.smiths_alpha)) / (max(self.smiths_alpha) - min(self.smiths_alpha))\n theta_range = self.theta_max - self.theta_min\n desired_theta = self.theta_min + (theta_range) * (1 - (alpha_range * math.exp(gamma * (alpha_range - 1))))\n self.theta = self.theta + self.long_term_learning_rate * (desired_theta - self.theta)\n\n def calcRshout(self):\n p = self.estimated_equilibrium[-1]\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n # Currently a buyer\n if l <= p: #extramarginal!\n self.r_shout = 0.0\n else: #intramarginal :(\n if self.buy_target > self.estimated_equilibrium[-1]:\n #r[0,1]\n self.r_shout = math.log(((self.buy_target - p) * (math.exp(theta) - 1) / (l - p)) + 1) / theta\n else:\n #r[-1,0]\n self.r_shout = math.log((1 - (self.buy_target/p)) * (math.exp(theta) - 1) + 1) / theta\n\n\n if self.job == 'Ask':\n # Currently a seller\n if l >= p: #extramarginal!\n self.r_shout = 0\n else: #intramarginal :(\n if self.sell_target > self.estimated_equilibrium[-1]:\n # r[-1,0]\n self.r_shout = math.log((self.sell_target - p) * (math.exp(theta) - 1) / (self.marketMax - p) + 1) / theta\n else:\n # r[0,1]\n a = (self.sell_target-l)/(p-l)\n self.r_shout = (math.log((1 - a) * (math.exp(theta) - 1) + 1)) / theta\n\n def calcAgg(self):\n delta = 0\n if self.job == 'Bid':\n # BUYER\n if self.buy_target >= self.previous_transactions[-1] :\n # must be more aggressive\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.buy_r = self.buy_r + self.short_term_learning_rate * (delta - self.buy_r)\n\n if self.job == 'Ask':\n # SELLER\n if self.sell_target > self.previous_transactions[-1] :\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.sell_r = self.sell_r + self.short_term_learning_rate * (delta - self.sell_r)\n\n def calcTarget(self):\n if len(self.estimated_equilibrium) > 0:\n p = self.estimated_equilibrium[-1]\n if self.limit == p:\n p = p * 1.000001 # to prevent theta_bar = 0\n elif self.job == 'Bid':\n p = self.limit - self.limit * 0.2 ## Initial guess for eq if no deals yet!!....\n elif self.job == 'Ask':\n p = self.limit + self.limit * 0.2\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n #BUYER\n minus_thing = (math.exp(-self.buy_r * theta) - 1) / (math.exp(theta) - 1)\n plus_thing = (math.exp(self.buy_r * theta) - 1) / (math.exp(theta) - 1)\n theta_bar = (theta * l - theta * p) / p\n if theta_bar == 0:\n theta_bar = 0.0001\n if math.exp(theta_bar) - 1 == 0:\n theta_bar = 0.0001\n bar_thing = (math.exp(-self.buy_r * theta_bar) - 1) / (math.exp(theta_bar) - 1)\n if l <= p: #Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l * (1 - minus_thing)\n else: #intramarginal\n if self.buy_r >= 0:\n self.buy_target = p + (l-p)*plus_thing\n else:\n self.buy_target = p*(1-bar_thing)\n if self.buy_target > l:\n self.buy_target = l\n\n if self.job == 'Ask':\n #SELLER\n minus_thing = (math.exp(-self.sell_r * theta) - 1) / (math.exp(theta) - 1)\n plus_thing = (math.exp(self.sell_r * theta) - 1) / (math.exp(theta) - 1)\n theta_bar = (theta * l - theta * p) / p\n if theta_bar == 0:\n theta_bar = 0.0001\n if math.exp(theta_bar) - 1 == 0:\n theta_bar = 0.0001\n bar_thing = (math.exp(-self.sell_r * theta_bar) - 1) / (math.exp(theta_bar) - 1) #div 0 sometimes what!?\n if l <= p: #Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l + (self.marketMax - l)*(minus_thing)\n else: #intramarginal\n if self.buy_r >= 0:\n self.buy_target = l + (p-l)*(1-plus_thing)\n else:\n self.buy_target = p + (self.marketMax - p)*(bar_thing)\n if self.sell_target < l:\n self.sell_target = l\n\n def getorder(self, time, countdown, lob,verbose):\n if len(self.orders) < 1:\n self.active = False\n return None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].atype\n self.calcTarget()\n\n if self.prev_best_bid_p == None:\n o_bid = 0\n else:\n o_bid = self.prev_best_bid_p\n if self.prev_best_ask_p == None:\n o_ask = self.marketMax\n else:\n o_ask = self.prev_best_ask_p\n\n if self.job == 'Bid': #BUYER\n if self.limit <= o_bid:\n return None\n else:\n if len(self.previous_transactions) > 0: ## has been at least one transaction\n\n o_ask_plus = (1+self.r_shout_change_relative)*o_ask + self.r_shout_change_absolute\n quoteprice = o_bid + ((min(self.limit, o_ask_plus) - o_bid) / self.offer_change_rate)\n else:\n\n if o_ask <= self.buy_target:\n quoteprice = o_ask\n else:\n quoteprice = o_bid + ((self.buy_target - o_bid) / self.offer_change_rate)\n if self.job == 'Ask':\n if self.limit >= o_ask:\n return None\n else:\n if len(self.previous_transactions) > 0: ## has been at least one transaction\n o_bid_minus = (1-self.r_shout_change_relative) * o_bid - self.r_shout_change_absolute\n quoteprice = o_ask - ((o_ask - max(self.limit, o_bid_minus)) / self.offer_change_rate)\n else:\n if o_bid >= self.sell_target:\n quoteprice = o_bid\n else:\n quoteprice = o_ask - ((o_ask - self.sell_target) / self.offer_change_rate)\n\n\n order = Order(self.tid,\n self.orders[0].atype,\n 'LIM',\n quoteprice,\n self.orders[0].qty,\n time, None, -1)\n self.lastquote=order\n return order\n\n def respond(self, time, lob, trade, verbose):\n ## Begin nicked from ZIP\n\n # what, if anything, has happened on the bid LOB? Nicked from ZIP..\n bid_improved = False\n bid_hit = False\n lob_best_bid_p = lob['bids']['bestp']\n lob_best_bid_q = None\n if lob_best_bid_p != None:\n # non-empty bid LOB\n lob_best_bid_q = lob['bids']['lob'][0][1]\n if self.prev_best_bid_p < lob_best_bid_p :\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\n # previous best bid was hit\n bid_hit = True\n elif self.prev_best_bid_p != None:\n # # the bid LOB has been emptied: was it cancelled or hit?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # bid_hit = False\n # else:\n # bid_hit = True\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n bid_hit = True\n else:\n bid_hit = False\n # what, if anything, has happened on the ask LOB?\n ask_improved = False\n ask_lifted = False\n lob_best_ask_p = lob['asks']['bestp']\n lob_best_ask_q = None\n if lob_best_ask_p != None:\n # non-empty ask LOB\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if self.prev_best_ask_p > lob_best_ask_p :\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\n ask_lifted = True\n elif self.prev_best_ask_p != None:\n # the ask LOB is empty now but was not previously: canceled or lifted?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # ask_lifted = False\n # else:\n # ask_lifted = True\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n ask_lifted = True\n else:\n ask_lifted = False\n\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_bid_q = lob_best_bid_q\n self.prev_best_ask_p = lob_best_ask_p\n self.prev_best_ask_q = lob_best_ask_q\n\n deal = bid_hit or ask_lifted\n\n ## End nicked from ZIP\n\n if deal:\n self.previous_transactions.append(trade['price'])\n if self.sell_target == None:\n self.sell_target = trade['price']\n if self.buy_target == None:\n self.buy_target = trade['price']\n self.calcEq()\n self.calcAlpha()\n self.calcTheta()\n self.calcRshout()\n self.calcAgg()\n self.calcTarget()\n #print 'sell: ', self.sell_target, 'buy: ', self.buy_target, 'limit:', self.limit, 'eq: ', self.estimated_equilibrium[-1], 'sell_r: ', self.sell_r, 'buy_r: ', self.buy_r, '\\n'\n\nclass Trader_IAAB(Trader):\n\n def __init__(self, ttype, tid, balance, time):\n # Stuff about trader\n # self.ttype = ttype\n # self.tid = tid\n # self.balance = balance\n # self.birthtime = time\n # self.profitpertime = 0\n # self.n_trades = 0\n # self.blotter = []\n # self.orders = []\n # self.n_quotes = 0\n # self.lastquote = None\n Trader.__init__(self, ttype, tid, balance, time)\n\n self.limit = None\n self.job = None\n\n # learning variables\n self.r_shout_change_relative = 0.05\n self.r_shout_change_absolute = 0.05\n self.short_term_learning_rate = random.uniform(0.1, 0.5)\n self.long_term_learning_rate = random.uniform(0.1, 0.5)\n self.moving_average_weight_decay = 0.95 # how fast weight decays with time, lower is quicker, 0.9 in vytelingum\n self.moving_average_window_size = 5\n self.offer_change_rate = 3.0\n self.theta = -2.0\n self.theta_max = 2.0\n self.theta_min = -8.0\n self.marketMax = bse_sys_maxprice\n\n # Variables to describe the market\n self.previous_transactions = []\n self.moving_average_weights = []\n for i in range(self.moving_average_window_size):\n self.moving_average_weights.append(self.moving_average_weight_decay**i)\n self.estimated_equilibrium = []\n self.smiths_alpha = []\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n\n # Trading Variables\n self.r_shout = None\n self.buy_target = None\n self.sell_target = None\n self.buy_r = -1.0 * (0.3 * random.random())\n self.sell_r = -1.0 * (0.3 * random.random())\n\n\n #Block order holder\n self.remaining_quantity = 0;\n\n\n def add_cust_order(self, order, verbose):\n # add a customer order to trader's records\n # currently LAZY: keeps to within max_cust_orders by appending new order and deleting head self.orders\n if len(self.quotes) > 0:\n # this trader has a live quote on the LOB, from a previous customer order\n # need response to signal cancellation/withdrawal of that quote\n response = 'LOB_Cancel'\n else:\n response = 'Proceed'\n if len(self.orders) >= self.max_cust_orders:\n self.orders = self.orders[1:]\n self.orders.append(order)\n\n self.remaining_quantity = order.qty\n\n if verbose: print('add_order < response=%s self.orders=%s' % (response, str(self.orders)))\n return response\n\n # delete a customer order from trader's list of orders being worked\n\n def del_cust_order(self, cust_order_id, verbose):\n if verbose:\n print('>del_cust_order: Cust_orderID=%s; self.orders=' % cust_order_id)\n for o in self.orders: print('%s ' % str(o))\n\n cust_orders = []\n for co in self.orders:\n if co.assignmentid != cust_order_id: cust_orders.append(co)\n\n self.orders = cust_orders\n\n # revise a customer order: used after a PARTial fill on the exchange\n\n def revise_cust_order(self, cust_order_id, revised_order, verbose):\n if verbose:\n print('>revise_cust_order: Cust_orderID=%s; revised_order=%s, self.orders=' % (\n cust_order_id, revised_order))\n for o in self.orders: print('%s ' % str(o))\n\n cust_orders = []\n for co in self.orders:\n if co.assignmentid != cust_order_id:\n cust_orders.append(co)\n else:\n revised_assignment = co\n revised_assignment.qty = self.remaining_quantity\n cust_orders.append(revised_assignment)\n\n self.orders = cust_orders\n\n if verbose:\n print('<revise_cust_order: Cust_orderID=%s; revised_order=%s, self.orders=' % (\n cust_order_id, revised_order))\n for o in self.orders: print('%s ' % str(o))\n\n # delete an order/quote from the trader's list of its orders live on the exchange\n\n def del_exch_order(self, oid, verbose):\n if verbose:\n print('>del_exch_order: OID:%d; self.quotes=' % oid)\n for q in self.quotes: print('%s ' % str(q))\n\n exch_orders = []\n for eo in self.quotes:\n if eo.orderid != oid: exch_orders.append(eo)\n\n self.quotes = exch_orders\n\n def bookkeep(self, msg, time, verbose):\n # bookkeep(): trader book-keeping in response to message from the exchange\n # update records of what orders are still being worked, account balance, etc.\n # trader's blotter is a simple sequential record of each exchange messages received, and the trader's balance after bookeeping that msh\n\n if verbose: print('>bookkeep msg=%s bal=%d' % (msg, self.balance))\n\n profit = 0\n\n if msg.event == \"CAN\":\n # order was cancelled at the exchange\n # so delete the order from the trader's records of what quotes it has live on the exchange\n if verbose:\n print(\">CANcellation: msg=%s quotes=\" % str(msg))\n for q in self.quotes: print(\"%s\" % str(q))\n\n newquotes = []\n for q in self.quotes:\n if q.orderid != msg.oid:\n newquotes.append(q)\n self.quotes = newquotes\n\n if verbose:\n print(\"<CANcellation: quotes=\")\n for q in self.quotes: print(\"%s\" % str(q))\n\n # an individual order of some types (e.g. MKT) can fill via transactions at different prices\n # so the message that comes back from the exchange has transaction data in a list: will often be length=1\n\n if msg.event == \"FILL\" or msg.event == \"PART\":\n\n for trans in msg.trns:\n transactionprice = trans[\"Price\"]\n qty = trans[\"Qty\"]\n\n\n self. remaining_quantity = self.remaining_quantity - qty;\n\n\n # find this LOB order in the trader's list of quotes sent to exchange\n exch_order = None\n for ord in self.quotes:\n if ord.orderid == msg.oid:\n exch_order = ord\n break\n if exch_order == None:\n s = 'FAIL: bookkeep() cant find order (msg.oid=%d) orders=' % msg.oid\n for ord in self.quotes: s = s + str(ord)\n sys.exit(s)\n\n cust_order_id = exch_order.myref\n cust_order = None\n for assignment in self.orders:\n if assignment.assignmentid == cust_order_id:\n cust_order = assignment\n break\n\n limitprice = cust_order.price\n\n if exch_order.otype == 'Bid':\n profit = (limitprice - transactionprice) * qty\n else:\n profit = (transactionprice - limitprice) * qty\n\n\n self.balance += profit\n\n print 'IAAB\\'s transaction quantity = '\n print qty\n print 'IAAB\\'s transaction price = '\n print transactionprice\n print 'IAAB\\'s profit = '\n print profit\n print 'IAAB\\'s balance = '\n print self.balance\n print 'IAAB\\'s remaining quantity = '\n print self.remaining_quantity\n\n\n\n self.n_trades += 1\n age = time - self.birthtime\n self.profitpertime = self.balance / age\n\n if verbose: print('Price=%d Limit=%d Q=%d Profit=%d N_trades=%d Age=%f Balance=%d' %\n (transactionprice, limitprice, qty, profit, self.n_trades, age,\n self.balance))\n\n if profit < 0:\n print self.tid\n print self.ttype\n print profit\n print exch_order\n sys.exit('Exit: Negative profit')\n\n if verbose: print('%s: profit=%d bal=%d profit/time=%f' %\n (self.tid, profit, self.balance, self.profitpertime))\n\n # by the time we get to here, exch_order is instantiated\n cust_order_id = exch_order.myref\n\n if msg.event == \"FILL\":\n # this order has completed in full, so it thereby completes the corresponding customer order\n # so delete both the customer order from trader's record of those\n # and the order has already been deleted from the exchange's records, so also needs to be deleted from trader's records of orders held at exchange\n cust_order_id = exch_order.myref\n\n if(self.remaining_quantity<=0):\n self.del_cust_order(cust_order_id, verbose) # delete this customer order\n\n\n\n\n self.del_exch_order(exch_order.orderid,\n verbose) # delete the exchange-order from trader's records\n\n elif msg.event == \"PART\":\n # the customer order is still live, but its quantity needs updating\n if verbose: print(\n '>bookkeep() PART-filled order updating qty on customer order ID=%s' % cust_order_id)\n self.revise_cust_order(cust_order_id, msg.revo, verbose) # delete this customer order\n\n if exch_order.ostyle == \"IOC\":\n # a partially filled IOC has the non-filled portion cancelled at the exchange,\n # so the trader's order records need to be updated accordingly\n if verbose: print(\n \">bookkeep() PART-filled IOC cancels remainder: deleting OID:%d from trader's exchange-order records\" % exch_order.orderid)\n self.del_exch_order(exch_order.orderid,\n verbose) # delete the exchange-order from trader's records\n\n self.blotter.append([msg, self.balance]) # add trade record to trader's blotter\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n def calcEq(self): ##clear and correct\n # Slightly modified from paper, it is unclear inpaper\n # N previous transactions * weights / N in vytelingum, swap N denominator for sum of weights to be correct?\n if len(self.previous_transactions) == 0:\n return\n elif len(self.previous_transactions) < self.moving_average_window_size:\n # Not enough transactions\n self.estimated_equilibrium.append(float(sum(self.previous_transactions)) / max(len(self.previous_transactions), 1))\n else:\n N_previous_transactions = self.previous_transactions[-self.moving_average_window_size:]\n thing = [N_previous_transactions[i]*self.moving_average_weights[i] for i in range(self.moving_average_window_size)]\n eq = sum( thing ) / sum(self.moving_average_weights)\n self.estimated_equilibrium.append(eq)\n\n def calcAlpha(self): ##correct. but calcAlpha in snashall's version is incorrect\n alpha = 0.0\n for p in self.previous_transactions:\n alpha += (p - self.estimated_equilibrium[-1])**2\n alpha = math.sqrt(alpha/len(self.previous_transactions))\n self.smiths_alpha.append( alpha/self.estimated_equilibrium[-1] )\n\n def calcTheta(self): ## clear and correct\n gamma = 2.0 #not sensitive apparently so choose to be whatever\n # necessary for intialisation, div by 0\n if min(self.smiths_alpha) == max(self.smiths_alpha):\n alpha_range = 0.4 #starting value i guess\n else:\n alpha_range = (self.smiths_alpha[-1] - min(self.smiths_alpha)) / (max(self.smiths_alpha) - min(self.smiths_alpha))\n theta_range = self.theta_max - self.theta_min\n desired_theta = self.theta_min + (theta_range) * (1 - alpha_range) * math.exp(gamma * (alpha_range - 1))\n self.theta = self.theta + self.long_term_learning_rate * (desired_theta - self.theta)\n if self.theta > self.theta_max :\n self.theta = self.theta_max\n if self.theta < self.theta_min :\n self.theta = self.theta_min\n\n def calcRshout(self): ## unclear in Vytelingum's paper\n p = self.estimated_equilibrium[-1]\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n # Currently a buyer\n if l <= p: #extramarginal!\n self.r_shout = 0.0\n else: #intramarginal :(\n if self.buy_target > self.estimated_equilibrium[-1]:\n #r[0,1]\n self.r_shout = math.log(((self.buy_target - p) * (math.exp(theta) - 1) / (l - p)) + 1) / theta\n else:\n #r[-1,0]\n # print 'buy_target: %f , p: %f , theta: %f' %(self.buy_target,p,theta)\n self.r_shout = math.log((1 - (self.buy_target/p)) * (math.exp(theta) - 1) + 1) / theta\n # self.r_shout = self.buy_r\n\n\n if self.job == 'Ask':\n # Currently a seller\n if l >= p: #extramarginal!\n self.r_shout = 0\n else: #intramarginal :(\n if self.sell_target > self.estimated_equilibrium[-1]:\n # r[-1,0]\n self.r_shout = math.log((self.sell_target - p) * (math.exp(theta) - 1) / (self.marketMax - p) + 1) / theta\n else:\n # r[0,1]\n a = (self.sell_target-l)/(p-l)\n self.r_shout = (math.log((1 - a) * (math.exp(theta) - 1) + 1)) / theta\n # self.r_shout = self.sell_r\n\n def calcAgg(self):\n delta = 0\n if self.job == 'Bid':\n # BUYER\n if self.buy_target >= self.previous_transactions[-1] :\n # must be more aggressive\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.buy_r = self.buy_r + self.short_term_learning_rate * (delta - self.buy_r)\n\n if self.job == 'Ask':\n # SELLER\n if self.sell_target > self.previous_transactions[-1] :\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\n else :\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\n\n self.sell_r = self.sell_r + self.short_term_learning_rate * (delta - self.sell_r)\n\n def calcTarget(self):\n if len(self.estimated_equilibrium) > 0:\n p = self.estimated_equilibrium[-1]\n if self.limit == p:\n p = p * 1.000001 # to prevent theta_bar = 0\n elif self.job == 'Bid':\n p = self.limit - self.limit * 0.2 ## Initial guess for eq if no deals yet!!....\n elif self.job == 'Ask':\n p = self.limit + self.limit * 0.2\n l = self.limit\n theta = self.theta\n if self.job == 'Bid':\n #BUYER\n minus_thing = self.buy_r * math.exp(theta*(self.buy_r-1))\n\n if l <= p: #Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l * (1 - minus_thing)\n else: #intramarginal\n if self.buy_r >= 0:\n # theta_ba = (p * math.exp(-theta))/(l-p)-1\n theta_ba = theta\n # print 'theta: %f' %(self.theta)\n # print 'theta_ba: %f '%(theta_ba)\n # print 'l-p: %f '%(l-p)\n # print 'self.buy_r :%f' %(self.buy_r)\n\n self.buy_target = (l-p)*(1-(self.buy_r+1)*math.exp(self.buy_r*theta_ba))+p\n else:\n self.buy_target = p*(1-minus_thing)\n if self.buy_target > l:\n self.buy_target = l\n if self.buy_target <bse_sys_minprice :\n self.buy_target = bse_sys_minprice\n # print 'buy_target = %f'%(self.buy_target)\n\n if self.job == 'Ask':\n #SELLER\n\n if l <= p: #Intramarginal\n if self.buy_r >= 0:\n self.buy_target = p + (p-l)* self.sell_r*math.exp((self.sell_r-1)*theta)\n else:\n theta_ba = math.log((self.marketMax-p)/(p-l))-theta\n self.buy_target = p + (self.marketMax-p)* self.sell_r*math.exp((self.sell_r+1)*theta_ba)\n else: # Extramarginal\n if self.buy_r >= 0:\n self.buy_target = l\n else:\n self.buy_target = l + (self.marketMax-l)*self.sell_r*math.exp((self.sell_r-1)*theta)\n if self.sell_target < l:\n self.sell_target = l\n if self.sell_target > bse_sys_maxprice:\n self.sell_target = bse_sys_maxprice\n # print 'sell_target = %f'%(self.sell_target)\n\n def getorder(self, time, countdown, lob,verbose):\n if len(self.orders) < 1:\n self.active = False\n return None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].atype\n self.calcTarget()\n\n if self.prev_best_bid_p == None:\n o_bid = 0\n else:\n o_bid = self.prev_best_bid_p\n if self.prev_best_ask_p == None:\n o_ask = self.marketMax\n else:\n o_ask = self.prev_best_ask_p\n\n if self.job == 'Bid': #BUYER\n if self.limit <= o_bid:\n return None\n else:\n if len(self.previous_transactions) > 0: ## has been at least one transaction\n o_ask_plus = (1+self.r_shout_change_relative)*o_ask + self.r_shout_change_absolute\n quoteprice = o_bid + ((min(self.limit, o_ask_plus) - o_bid) / self.offer_change_rate)\n else:\n if o_ask <= self.buy_target:\n quoteprice = o_ask\n else:\n quoteprice = o_bid + ((self.buy_target - o_bid) / self.offer_change_rate)\n if self.job == 'Ask':\n if self.limit >= o_ask:\n return None\n else:\n if len(self.previous_transactions) <= 0: ## has been at least one transaction\n o_bid_minus = (1-self.r_shout_change_relative) * o_bid - self.r_shout_change_absolute\n quoteprice = o_ask - ((o_ask - max(self.limit, o_bid_minus)) / self.offer_change_rate)\n else:\n if o_bid >= self.sell_target:\n quoteprice = o_bid\n else:\n quoteprice = o_ask - ((o_ask - self.sell_target) / self.offer_change_rate)\n def imbalance_alter (quoteprice_aa, lob):\n\n if(lob['microprice']==None or lob['midprice']==None):\n return quoteprice_aa\n\n quoteprice_iaa = 0\n imbalance_ratio = 0\n # the measurement of size in demand side\n volume_bids = 0\n # the measurement of size in supply side\n volume_asks = 0\n #the depth of lob, how many different price.\n count_bids_depth = 0\n count_asks_depth = 0\n for item in lob['bids']['lob']:\n volume_bids += math.exp(-0.5*count_bids_depth) *item[1]\n count_bids_depth +=1\n #only consider the first 3 quota\n if(count_bids_depth >=3): break\n for item in lob['asks']['lob']:\n volume_asks += math.exp(-0.5*count_asks_depth) *item[1]\n count_asks_depth +=1\n #only consider the first 3 quota\n if(count_asks_depth >=3): break\n #lob is none\n if volume_bids == 0 and volume_asks == 0:\n return quoteprice_aa\n else :\n # imbalance_ratio will be treated as learning rate in widrow\n imbalance_ratio = (volume_bids-volume_asks)/(volume_bids+volume_asks)\n if self.job == 'Bid':\n quoteprice_iaa = quoteprice_aa+imbalance_ratio*(lob['microprice']-lob['midprice'])\n if(quoteprice_iaa>self.limit):\n quoteprice_iaa = self.limit\n else:\n quoteprice_iaa = quoteprice_aa+ imbalance_ratio*(lob['microprice']-lob['midprice'])\n if(quoteprice_iaa<self.limit):\n quoteprice_iaa = self.limit\n # return quoteprice_aa\n # if count_bids_depth/count_asks_depth >=3 or count_asks_depth/count_bids_depth>=3 :\n # return quoteprice_iaa\n # else: return quoteprice_aa\n # # return quoteprice_aa\n # print 'depth_bids: %f' %(count_bids_depth)\n # print 'depth_asks: %f' %(count_asks_depth)\n # print 'volume_bids: %f' %(volume_bids)\n # print 'volume_asks: %f' %(volume_asks)\n # print 'imbalance ratio: %f' %(imbalance_ratio)\n # print 'IAA original quotaprice: %d' % (quoteprice_aa)\n # print 'IAA final quotaprice: %d' % (quoteprice_iaa)\n return quoteprice_iaa\n\n quoteprice_iaa = imbalance_alter(quoteprice,lob)\n\n def divide_block_order ():\n return min(random.randint(1,3),self.remaining_quantity)\n order = Order(self.tid,\n self.orders[0].atype,\n 'LIM',\n quoteprice_iaa,\n divide_block_order(),\n time, None, -1)\n self.lastquote=order\n\n print 'IAAB deal with block order'\n print str(order)\n return order\n\n def respond(self, time, lob, trade, verbose):\n ## Begin nicked from ZIP\n\n # what, if anything, has happened on the bid LOB? Nicked from ZIP..\n bid_improved = False\n bid_hit = False\n lob_best_bid_p = lob['bids']['bestp']\n lob_best_bid_q = None\n if lob_best_bid_p != None:\n # non-empty bid LOB\n lob_best_bid_q = lob['bids']['lob'][0][1]\n if self.prev_best_bid_p < lob_best_bid_p :\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\n # previous best bid was hit\n bid_hit = True\n elif self.prev_best_bid_p != None:\n # # the bid LOB has been emptied: was it cancelled or hit?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # bid_hit = False\n # else:\n # bid_hit = True\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n bid_hit = True\n else:\n bid_hit = False\n # what, if anything, has happened on the ask LOB?\n ask_improved = False\n ask_lifted = False\n lob_best_ask_p = lob['asks']['bestp']\n lob_best_ask_q = None\n if lob_best_ask_p != None:\n # non-empty ask LOB\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if self.prev_best_ask_p > lob_best_ask_p :\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\n ask_lifted = True\n elif self.prev_best_ask_p != None:\n # the ask LOB is empty now but was not previously: canceled or lifted?\n # last_tape_item = lob['tape'][-1]\n # if last_tape_item['type'] == 'Cancel' :\n # ask_lifted = False\n # else:\n # ask_lifted = True\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\n if trade != None:\n # a trade has occurred and the previously nonempty ask LOB is now empty\n # so assume best ask was lifted\n ask_lifted = True\n else:\n ask_lifted = False\n\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_bid_q = lob_best_bid_q\n self.prev_best_ask_p = lob_best_ask_p\n self.prev_best_ask_q = lob_best_ask_q\n\n deal = bid_hit or ask_lifted\n\n ## End nicked from ZIP\n\n if deal:\n self.previous_transactions.append(trade['price'])\n if self.sell_target == None:\n self.sell_target = trade['price']\n if self.buy_target == None:\n self.buy_target = trade['price']\n self.calcEq()\n self.calcAlpha()\n self.calcTheta()\n self.calcRshout()\n self.calcAgg()\n self.calcTarget()\n #print 'sell: ', self.sell_target, 'buy: ', self.buy_target, 'limit:', self.limit, 'eq: ', self.estimated_equilibrium[-1], 'sell_r: ', self.sell_r, 'buy_r: ', self.buy_r, '\\n'\n\n\n" }, { "alpha_fraction": 0.4231961667537689, "alphanum_fraction": 0.43182769417762756, "avg_line_length": 47.0987663269043, "blob_id": "792b8821f823b574ce18cb2ae2debb545fb04c67", "content_id": "1cf751a55b218d1d2882cdae712d3767efddad1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11933, "license_type": "permissive", "max_line_length": 224, "num_lines": 243, "path": "/ZhenZhang/source/GDX.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# Trader subclass ZIP\r\n# After Cliff 1997\r\n\r\n\r\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\nclass Trader_GDX(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n self.prev_orders = []\r\n self.active = False\r\n self.limit = None\r\n self.job = None\r\n\r\n\r\n\r\n #memory of all bids and asks and accepted bids and asks\r\n self.outstanding_bids = []\r\n self.outstanding_asks = []\r\n self.accepted_asks = []\r\n self.accepted_bids = []\r\n\r\n self.price = -1\r\n\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n self.first_turn = True\r\n\r\n self.gamma = 0.1\r\n\r\n self.holdings = 10\r\n self.remaining_offer_ops = 10\r\n self.values = [[0 for n in range(self.remaining_offer_ops)] for m in range(self.holdings)]\r\n\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].atype\r\n\r\n #calculate price\r\n if self.job == 'Bid':\r\n self.price = self.calc_p_bid(self.holdings - 1, self.remaining_offer_ops - 1)\r\n if self.job == 'Ask':\r\n self.price = self.calc_p_ask(self.holdings - 1, self.remaining_offer_ops - 1)\r\n\r\n order = Order(self.tid, self.job, 'LIM',self.price, self.orders[0].qty, time, None, -1)\r\n self.lastquote = order\r\n\r\n if self.first_turn or self.price == -1:\r\n if self.job == 'Bid':\r\n order = Order(self.tid, self.job, 'LIM',bse_sys_minprice+1 , self.orders[0].qty, time, None, -1)\r\n if self.job == 'Ask':\r\n order = Order(self.tid, self.job, 'LIM',bse_sys_maxprice-1 , self.orders[0].qty, time, None, -1)\r\n\r\n\r\n return order\r\n\r\n def calc_p_bid(self, m, n):\r\n best_return = 0\r\n best_bid = 0\r\n second_best_return = 0\r\n second_best_bid = 0\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n thing = self.belief_buy(i) * ((self.limit - i) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_bid = best_bid\r\n second_best_return = best_return\r\n best_return = thing\r\n best_bid = i\r\n\r\n #always best bid largest one\r\n if second_best_bid > best_bid:\r\n a = second_best_bid\r\n second_best_bid = best_bid\r\n best_bid = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_bid), int(best_bid))]:\r\n thing = self.belief_buy(i + second_best_bid) * ((self.limit - (i + second_best_bid)) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i + second_best_bid) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_bid = i + second_best_bid\r\n\r\n return best_bid\r\n\r\n def calc_p_ask(self, m, n):\r\n best_return = 0\r\n best_ask = self.limit\r\n second_best_return = 0\r\n second_best_ask = self.limit\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n j = i + self.limit\r\n thing = self.belief_sell(j) * ((j - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(j) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_ask = best_ask\r\n second_best_return = best_return\r\n best_return = thing\r\n best_ask = j\r\n #always best ask largest one\r\n if second_best_ask > best_ask:\r\n a = second_best_ask\r\n second_best_ask = best_ask\r\n best_ask = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_ask), int(best_ask))]:\r\n thing = self.belief_sell(i + second_best_ask) * (((i + second_best_ask) - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(i + second_best_ask) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_ask = i + second_best_ask\r\n\r\n return best_ask\r\n\r\n def belief_sell(self, price):\r\n accepted_asks_greater = 0\r\n bids_greater = 0\r\n unaccepted_asks_lower = 0\r\n for p in self.accepted_asks:\r\n if p >= price:\r\n accepted_asks_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n bids_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n unaccepted_asks_lower += 1\r\n\r\n if accepted_asks_greater + bids_greater + unaccepted_asks_lower == 0:\r\n return 0\r\n return (accepted_asks_greater + bids_greater) / (accepted_asks_greater + bids_greater + unaccepted_asks_lower)\r\n\r\n def belief_buy(self, price):\r\n accepted_bids_lower = 0\r\n asks_lower = 0\r\n unaccepted_bids_greater = 0\r\n for p in self.accepted_bids:\r\n if p <= price:\r\n accepted_bids_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n asks_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n unaccepted_bids_greater += 1\r\n if accepted_bids_lower + asks_lower + unaccepted_bids_greater == 0:\r\n return 0\r\n return (accepted_bids_lower + asks_lower) / (accepted_bids_lower + asks_lower + unaccepted_bids_greater)\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n # what, if anything, has happened on the bid LOB?\r\n self.outstanding_bids = lob['bids']['lob']\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['bestp']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n self.accepted_bids.append(self.prev_best_bid_p)\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n self.outstanding_asks = lob['asks']['lob']\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['bestp']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n self.accepted_asks.append(self.prev_best_ask_p)\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n\r\n #populate expected values\r\n if self.first_turn:\r\n # print \"populating\"\r\n self.first_turn = False\r\n for n in range(1, self.remaining_offer_ops):\r\n for m in range(1, self.holdings):\r\n if self.job == 'Bid':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_bid(m, n)\r\n\r\n if self.job == 'Ask':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_ask(m, n)\r\n # print \"done\"\r\n\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n" }, { "alpha_fraction": 0.431112140417099, "alphanum_fraction": 0.43954962491989136, "avg_line_length": 47.52207565307617, "blob_id": "86b085ddd8ba6e79139860f034723cefc3609d8a", "content_id": "8949f7477b24b0c0031b8c7f86f58abed380f46c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108800, "license_type": "permissive", "max_line_length": 224, "num_lines": 2197, "path": "/snashall2019.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n#\r\n# BSE: The Bristol Stock Exchange\r\n#\r\n# Version 1.3; July 21st, 2018.\r\n# Version 1.2; November 17th, 2012.\r\n#\r\n# Copyright (c) 2012-2018, Dave Cliff\r\n#\r\n#\r\n# ------------------------\r\n#\r\n# MIT Open-Source License:\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\r\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\r\n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\r\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all copies or substantial\r\n# portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\r\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\r\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n#\r\n# ------------------------\r\n#\r\n#\r\n#\r\n# BSE is a very simple simulation of automated execution traders\r\n# operating on a very simple model of a limit order book (LOB) exchange\r\n#\r\n# major simplifications in this version:\r\n# (a) only one financial instrument being traded\r\n# (b) traders can only trade contracts of size 1 (will add variable quantities later)\r\n# (c) each trader can have max of one order per single orderbook.\r\n# (d) traders can replace/overwrite earlier orders, and/or can cancel\r\n# (d) simply processes each order in sequence and republishes LOB to all traders\r\n# => no issues with exchange processing latency/delays or simultaneously issued orders.\r\n#\r\n# NB this code has been written to be readable/intelligible, not efficient!\r\n\r\n# could import pylab here for graphing etc\r\n\r\nimport sys\r\nimport math\r\nimport random\r\n\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 1000 # maximum price in the system, in cents/pennies\r\nticksize = 1 # minimum change in price, in cents/pennies\r\n\r\n\r\n\r\n# an Order/quote has a trader id, a type (buy/sell) price, quantity, timestamp, and unique i.d.\r\nclass Order:\r\n\r\n def __init__(self, tid, otype, price, qty, time, qid):\r\n self.tid = tid # trader i.d.\r\n self.otype = otype # order type\r\n self.price = price # price\r\n self.qty = qty # quantity\r\n self.time = time # timestamp\r\n self.qid = qid # quote i.d. (unique to each quote)\r\n\r\n def __str__(self):\r\n return '[%s %s P=%03d Q=%s T=%5.2f QID:%d]' % \\\r\n (self.tid, self.otype, self.price, self.qty, self.time, self.qid)\r\n\r\n\r\n\r\n# Orderbook_half is one side of the book: a list of bids or a list of asks, each sorted best-first\r\n\r\nclass Orderbook_half:\r\n\r\n def __init__(self, booktype, worstprice):\r\n # booktype: bids or asks?\r\n self.booktype = booktype\r\n # dictionary of orders received, indexed by Trader ID\r\n self.orders = {}\r\n # limit order book, dictionary indexed by price, with order info\r\n self.lob = {}\r\n # anonymized LOB, lists, with only price/qty info\r\n self.lob_anon = []\r\n # summary stats\r\n self.best_price = None\r\n self.best_tid = None\r\n self.worstprice = worstprice\r\n self.n_orders = 0 # how many orders?\r\n self.lob_depth = 0 # how many different prices on lob?\r\n\r\n\r\n def anonymize_lob(self):\r\n # anonymize a lob, strip out order details, format as a sorted list\r\n # NB for asks, the sorting should be reversed\r\n self.lob_anon = []\r\n for price in sorted(self.lob):\r\n qty = self.lob[price][0]\r\n self.lob_anon.append([price, qty])\r\n\r\n\r\n def build_lob(self):\r\n lob_verbose = False\r\n # take a list of orders and build a limit-order-book (lob) from it\r\n # NB the exchange needs to know arrival times and trader-id associated with each order\r\n # returns lob as a dictionary (i.e., unsorted)\r\n # also builds anonymized version (just price/quantity, sorted, as a list) for publishing to traders\r\n self.lob = {}\r\n for tid in self.orders:\r\n order = self.orders.get(tid)\r\n price = order.price\r\n if price in self.lob:\r\n # update existing entry\r\n qty = self.lob[price][0]\r\n orderlist = self.lob[price][1]\r\n orderlist.append([order.time, order.qty, order.tid, order.qid])\r\n self.lob[price] = [qty + order.qty, orderlist]\r\n else:\r\n # create a new dictionary entry\r\n self.lob[price] = [order.qty, [[order.time, order.qty, order.tid, order.qid]]]\r\n # create anonymized version\r\n self.anonymize_lob()\r\n # record best price and associated trader-id\r\n if len(self.lob) > 0 :\r\n if self.booktype == 'Bid':\r\n self.best_price = self.lob_anon[-1][0]\r\n else :\r\n self.best_price = self.lob_anon[0][0]\r\n self.best_tid = self.lob[self.best_price][1][0][2]\r\n else :\r\n self.best_price = None\r\n self.best_tid = None\r\n\r\n if lob_verbose : print self.lob\r\n\r\n\r\n def book_add(self, order):\r\n # add order to the dictionary holding the list of orders\r\n # either overwrites old order from this trader\r\n # or dynamically creates new entry in the dictionary\r\n # so, max of one order per trader per list\r\n # checks whether length or order list has changed, to distinguish addition/overwrite\r\n #print('book_add > %s %s' % (order, self.orders))\r\n n_orders = self.n_orders\r\n self.orders[order.tid] = order\r\n self.n_orders = len(self.orders)\r\n self.build_lob()\r\n #print('book_add < %s %s' % (order, self.orders))\r\n if n_orders != self.n_orders :\r\n return('Addition')\r\n else:\r\n return('Overwrite')\r\n\r\n\r\n\r\n def book_del(self, order):\r\n # delete order from the dictionary holding the orders\r\n # assumes max of one order per trader per list\r\n # checks that the Trader ID does actually exist in the dict before deletion\r\n # print('book_del %s',self.orders)\r\n if self.orders.get(order.tid) != None :\r\n del(self.orders[order.tid])\r\n self.n_orders = len(self.orders)\r\n self.build_lob()\r\n # print('book_del %s', self.orders)\r\n\r\n\r\n def delete_best(self):\r\n # delete order: when the best bid/ask has been hit, delete it from the book\r\n # the TraderID of the deleted order is return-value, as counterparty to the trade\r\n best_price_orders = self.lob[self.best_price]\r\n best_price_qty = best_price_orders[0]\r\n best_price_counterparty = best_price_orders[1][0][2]\r\n if best_price_qty == 1:\r\n # here the order deletes the best price\r\n del(self.lob[self.best_price])\r\n del(self.orders[best_price_counterparty])\r\n self.n_orders = self.n_orders - 1\r\n if self.n_orders > 0:\r\n if self.booktype == 'Bid':\r\n self.best_price = max(self.lob.keys())\r\n else:\r\n self.best_price = min(self.lob.keys())\r\n self.lob_depth = len(self.lob.keys())\r\n else:\r\n self.best_price = self.worstprice\r\n self.lob_depth = 0\r\n else:\r\n # best_bid_qty>1 so the order decrements the quantity of the best bid\r\n # update the lob with the decremented order data\r\n self.lob[self.best_price] = [best_price_qty - 1, best_price_orders[1][1:]]\r\n\r\n # update the bid list: counterparty's bid has been deleted\r\n del(self.orders[best_price_counterparty])\r\n self.n_orders = self.n_orders - 1\r\n self.build_lob()\r\n return best_price_counterparty\r\n\r\n\r\n\r\n# Orderbook for a single instrument: list of bids and list of asks\r\n\r\nclass Orderbook(Orderbook_half):\r\n\r\n def __init__(self):\r\n self.bids = Orderbook_half('Bid', bse_sys_minprice)\r\n self.asks = Orderbook_half('Ask', bse_sys_maxprice)\r\n self.tape = []\r\n self.quote_id = 0 #unique ID code for each quote accepted onto the book\r\n\r\n\r\n\r\n# Exchange's internal orderbook\r\n\r\nclass Exchange(Orderbook):\r\n\r\n def add_order(self, order, verbose):\r\n # add a quote/order to the exchange and update all internal records; return unique i.d.\r\n order.qid = self.quote_id\r\n self.quote_id = order.qid + 1\r\n # if verbose : print('QUID: order.quid=%d self.quote.id=%d' % (order.qid, self.quote_id))\r\n tid = order.tid\r\n if order.otype == 'Bid':\r\n response=self.bids.book_add(order)\r\n best_price = self.bids.lob_anon[-1][0]\r\n self.bids.best_price = best_price\r\n self.bids.best_tid = self.bids.lob[best_price][1][0][2]\r\n else:\r\n response=self.asks.book_add(order)\r\n best_price = self.asks.lob_anon[0][0]\r\n self.asks.best_price = best_price\r\n self.asks.best_tid = self.asks.lob[best_price][1][0][2]\r\n return [order.qid, response]\r\n\r\n\r\n def del_order(self, time, order, verbose):\r\n # delete a trader's quot/order from the exchange, update all internal records\r\n tid = order.tid\r\n if order.otype == 'Bid':\r\n self.bids.book_del(order)\r\n if self.bids.n_orders > 0 :\r\n best_price = self.bids.lob_anon[-1][0]\r\n self.bids.best_price = best_price\r\n self.bids.best_tid = self.bids.lob[best_price][1][0][2]\r\n else: # this side of book is empty\r\n self.bids.best_price = None\r\n self.bids.best_tid = None\r\n cancel_record = { 'type': 'Cancel', 'time': time, 'order': order }\r\n self.tape.append(cancel_record)\r\n\r\n elif order.otype == 'Ask':\r\n self.asks.book_del(order)\r\n if self.asks.n_orders > 0 :\r\n best_price = self.asks.lob_anon[0][0]\r\n self.asks.best_price = best_price\r\n self.asks.best_tid = self.asks.lob[best_price][1][0][2]\r\n else: # this side of book is empty\r\n self.asks.best_price = None\r\n self.asks.best_tid = None\r\n cancel_record = { 'type': 'Cancel', 'time': time, 'order': order }\r\n self.tape.append(cancel_record)\r\n else:\r\n # neither bid nor ask?\r\n sys.exit('bad order type in del_quote()')\r\n\r\n\r\n\r\n def process_order2(self, time, order, verbose):\r\n # receive an order and either add it to the relevant LOB (ie treat as limit order)\r\n # or if it crosses the best counterparty offer, execute it (treat as a market order)\r\n oprice = order.price\r\n counterparty = None\r\n [qid, response] = self.add_order(order, verbose) # add it to the order lists -- overwriting any previous order\r\n order.qid = qid\r\n if verbose :\r\n print('QUID: order.quid=%d' % order.qid)\r\n print('RESPONSE: %s' % response)\r\n best_ask = self.asks.best_price\r\n best_ask_tid = self.asks.best_tid\r\n best_bid = self.bids.best_price\r\n best_bid_tid = self.bids.best_tid\r\n if order.otype == 'Bid':\r\n if self.asks.n_orders > 0 and best_bid >= best_ask:\r\n # bid lifts the best ask\r\n if verbose: print(\"Bid $%s lifts best ask\" % oprice)\r\n counterparty = best_ask_tid\r\n price = best_ask # bid crossed ask, so use ask price\r\n if verbose: print('counterparty, price', counterparty, price)\r\n # delete the ask just crossed\r\n self.asks.delete_best()\r\n # delete the bid that was the latest order\r\n self.bids.delete_best()\r\n elif order.otype == 'Ask':\r\n if self.bids.n_orders > 0 and best_ask <= best_bid:\r\n # ask hits the best bid\r\n if verbose: print(\"Ask $%s hits best bid\" % oprice)\r\n # remove the best bid\r\n counterparty = best_bid_tid\r\n price = best_bid # ask crossed bid, so use bid price\r\n if verbose: print('counterparty, price', counterparty, price)\r\n # delete the bid just crossed, from the exchange's records\r\n self.bids.delete_best()\r\n # delete the ask that was the latest order, from the exchange's records\r\n self.asks.delete_best()\r\n else:\r\n # we should never get here\r\n sys.exit('process_order() given neither Bid nor Ask')\r\n # NB at this point we have deleted the order from the exchange's records\r\n # but the two traders concerned still have to be notified\r\n if verbose: print('counterparty %s' % counterparty)\r\n if counterparty != None:\r\n # process the trade\r\n if verbose: print('>>>>>>>>>>>>>>>>>TRADE t=%5.2f $%d %s %s' % (time, price, counterparty, order.tid))\r\n transaction_record = { 'type': 'Trade',\r\n 'time': time,\r\n 'price': price,\r\n 'party1':counterparty,\r\n 'party2':order.tid,\r\n 'qty': order.qty\r\n }\r\n self.tape.append(transaction_record)\r\n return transaction_record\r\n else:\r\n return None\r\n\r\n\r\n\r\n def tape_dump(self, fname, fmode, tmode):\r\n dumpfile = open(fname, fmode)\r\n for tapeitem in self.tape:\r\n if tapeitem['type'] == 'Trade' :\r\n dumpfile.write('%s, %s\\n' % (tapeitem['time'], tapeitem['price']))\r\n dumpfile.close()\r\n if tmode == 'wipe':\r\n self.tape = []\r\n\r\n\r\n # this returns the LOB data \"published\" by the exchange,\r\n # i.e., what is accessible to the traders\r\n def publish_lob(self, time, verbose):\r\n public_data = {}\r\n public_data['time'] = time\r\n public_data['bids'] = {'best':self.bids.best_price,\r\n 'worst':self.bids.worstprice,\r\n 'n': self.bids.n_orders,\r\n 'lob':self.bids.lob_anon}\r\n public_data['asks'] = {'best':self.asks.best_price,\r\n 'worst':self.asks.worstprice,\r\n 'n': self.asks.n_orders,\r\n 'lob':self.asks.lob_anon}\r\n public_data['QID'] = self.quote_id\r\n public_data['tape'] = self.tape\r\n if verbose:\r\n print('publish_lob: t=%d' % time)\r\n print('BID_lob=%s' % public_data['bids']['lob'])\r\n # print('best=%s; worst=%s; n=%s ' % (self.bids.best_price, self.bids.worstprice, self.bids.n_orders))\r\n print('ASK_lob=%s' % public_data['asks']['lob'])\r\n # print('qid=%d' % self.quote_id)\r\n\r\n return public_data\r\n\r\n\r\n\r\n\r\n\r\n\r\n##################--Traders below here--#############\r\n\r\n\r\n# Trader superclass\r\n# all Traders have a trader id, bank balance, blotter, and list of orders to execute\r\nclass Trader:\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n self.ttype = ttype # what type / strategy this trader is\r\n self.tid = tid # trader unique ID code\r\n self.balance = balance # money in the bank\r\n self.blotter = [] # record of trades executed\r\n self.orders = [] # customer orders currently being worked (fixed at 1)\r\n self.n_quotes = 0 # number of quotes live on LOB\r\n self.willing = 1 # used in ZIP etc\r\n self.able = 1 # used in ZIP etc\r\n self.birthtime = time # used when calculating age of a trader/strategy\r\n self.profitpertime = 0 # profit per unit time\r\n self.n_trades = 0 # how many trades has this trader done?\r\n self.lastquote = None # record of what its last quote was\r\n\r\n\r\n def __str__(self):\r\n return '[TID %s type %s balance %s blotter %s orders %s n_trades %s profitpertime %s]' \\\r\n % (self.tid, self.ttype, self.balance, self.blotter, self.orders, self.n_trades, self.profitpertime)\r\n\r\n\r\n def add_order(self, order, verbose):\r\n # in this version, trader has at most one order,\r\n # if allow more than one, this needs to be self.orders.append(order)\r\n if self.n_quotes > 0 :\r\n # this trader has a live quote on the LOB, from a previous customer order\r\n # need response to signal cancellation/withdrawal of that quote\r\n response = 'LOB_Cancel'\r\n else:\r\n response = 'Proceed'\r\n self.orders = [order]\r\n if verbose : print('add_order < response=%s' % response)\r\n return response\r\n\r\n\r\n def del_order(self, order):\r\n # this is lazy: assumes each trader has only one customer order with quantity=1, so deleting sole order\r\n # CHANGE TO DELETE THE HEAD OF THE LIST AND KEEP THE TAIL\r\n self.orders = []\r\n\r\n\r\n def bookkeep(self, trade, order, verbose, time):\r\n\r\n outstr=\"\"\r\n for order in self.orders: outstr = outstr + str(order)\r\n\r\n self.blotter.append(trade) # add trade record to trader's blotter\r\n # NB What follows is **LAZY** -- assumes all orders are quantity=1\r\n transactionprice = trade['price']\r\n if self.orders[0].otype == 'Bid':\r\n profit = self.orders[0].price - transactionprice\r\n else:\r\n profit = transactionprice - self.orders[0].price\r\n self.balance += profit\r\n self.n_trades += 1\r\n self.profitpertime = self.balance/(time - self.birthtime)\r\n\r\n if profit < 0 :\r\n print profit\r\n print trade\r\n print order\r\n sys.exit()\r\n\r\n if verbose: print('%s profit=%d balance=%d profit/time=%d' % (outstr, profit, self.balance, self.profitpertime))\r\n self.del_order(order) # delete the order\r\n\r\n\r\n # specify how trader responds to events in the market\r\n # this is a null action, expect it to be overloaded by specific algos\r\n def respond(self, time, lob, trade, verbose):\r\n return None\r\n\r\n # specify how trader mutates its parameter values\r\n # this is a null action, expect it to be overloaded by specific algos\r\n def mutate(self, time, lob, trade, verbose):\r\n return None\r\n\r\n\r\n\r\n# Trader subclass Giveaway\r\n# even dumber than a ZI-U: just give the deal away\r\n# (but never makes a loss)\r\nclass Trader_Giveaway(Trader):\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n order = None\r\n else:\r\n quoteprice = self.orders[0].price\r\n order = Order(self.tid,\r\n self.orders[0].otype,\r\n quoteprice,\r\n self.orders[0].qty,\r\n time, lob['QID'])\r\n self.lastquote=order\r\n return order\r\n\r\n# Trader subclass AA\r\nclass Trader_AA(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n # Stuff about trader\r\n self.ttype = ttype\r\n self.tid = tid\r\n self.balance = balance\r\n self.birthtime = time\r\n self.profitpertime = 0\r\n self.n_trades = 0\r\n self.blotter = []\r\n self.orders = []\r\n self.n_quotes = 0\r\n self.lastquote = None\r\n\r\n self.limit = None\r\n self.job = None\r\n\r\n # learning variables\r\n self.r_shout_change_relative = 0.05\r\n self.r_shout_change_absolute = 0.05\r\n self.short_term_learning_rate = random.uniform(0.1, 0.5)\r\n self.long_term_learning_rate = random.uniform(0.1, 0.5)\r\n self.moving_average_weight_decay = 0.95 # how fast weight decays with time, lower is quicker, 0.9 in vytelingum\r\n self.moving_average_window_size = 5\r\n self.offer_change_rate = 3.0\r\n self.theta = -2.0\r\n self.theta_max = 2.0\r\n self.theta_min = -8.0\r\n self.marketMax = bse_sys_maxprice\r\n\r\n # Variables to describe the market\r\n self.previous_transactions = []\r\n self.moving_average_weights = []\r\n for i in range(self.moving_average_window_size):\r\n self.moving_average_weights.append(self.moving_average_weight_decay**i)\r\n self.estimated_equilibrium = []\r\n self.smiths_alpha = []\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n # Trading Variables\r\n self.r_shout = None\r\n self.buy_target = None\r\n self.sell_target = None\r\n self.buy_r = -1.0 * (0.3 * random.random())\r\n self.sell_r = -1.0 * (0.3 * random.random())\r\n\r\n\r\n\r\n def calcEq(self):\r\n # Slightly modified from paper, it is unclear inpaper\r\n # N previous transactions * weights / N in vytelingum, swap N denominator for sum of weights to be correct?\r\n if len(self.previous_transactions) == 0:\r\n return\r\n elif len(self.previous_transactions) < self.moving_average_window_size:\r\n # Not enough transactions\r\n self.estimated_equilibrium.append(float(sum(self.previous_transactions)) / max(len(self.previous_transactions), 1))\r\n else:\r\n N_previous_transactions = self.previous_transactions[-self.moving_average_window_size:]\r\n thing = [N_previous_transactions[i]*self.moving_average_weights[i] for i in range(self.moving_average_window_size)]\r\n eq = sum( thing ) / sum(self.moving_average_weights)\r\n self.estimated_equilibrium.append(eq)\r\n\r\n def calcAlpha(self):\r\n alpha = 0.0\r\n for p in self.estimated_equilibrium:\r\n alpha += (p - self.estimated_equilibrium[-1])**2\r\n alpha = math.sqrt(alpha/len(self.estimated_equilibrium))\r\n self.smiths_alpha.append( alpha/self.estimated_equilibrium[-1] )\r\n\r\n def calcTheta(self):\r\n gamma = 2.0 #not sensitive apparently so choose to be whatever\r\n # necessary for intialisation, div by 0\r\n if min(self.smiths_alpha) == max(self.smiths_alpha):\r\n alpha_range = 0.4 #starting value i guess\r\n else:\r\n alpha_range = (self.smiths_alpha[-1] - min(self.smiths_alpha)) / (max(self.smiths_alpha) - min(self.smiths_alpha))\r\n theta_range = self.theta_max - self.theta_min\r\n desired_theta = self.theta_min + (theta_range) * (1 - (alpha_range * math.exp(gamma * (alpha_range - 1))))\r\n self.theta = self.theta + self.long_term_learning_rate * (desired_theta - self.theta)\r\n\r\n def calcRshout(self):\r\n p = self.estimated_equilibrium[-1]\r\n l = self.limit\r\n theta = self.theta\r\n if self.job == 'Bid':\r\n # Currently a buyer\r\n if l <= p: #extramarginal!\r\n self.r_shout = 0.0\r\n else: #intramarginal :(\r\n if self.buy_target > self.estimated_equilibrium[-1]:\r\n #r[0,1]\r\n self.r_shout = math.log(((self.buy_target - p) * (math.exp(theta) - 1) / (l - p)) + 1) / theta\r\n else:\r\n #r[-1,0]\r\n self.r_shout = math.log((1 - (self.buy_target/p)) * (math.exp(theta) - 1) + 1) / theta\r\n\r\n\r\n if self.job == 'Ask':\r\n # Currently a seller\r\n if l >= p: #extramarginal!\r\n self.r_shout = 0\r\n else: #intramarginal :(\r\n if self.sell_target > self.estimated_equilibrium[-1]:\r\n # r[-1,0]\r\n self.r_shout = math.log((self.sell_target - p) * (math.exp(theta) - 1) / (self.marketMax - p) + 1) / theta\r\n else:\r\n # r[0,1]\r\n a = (self.sell_target-l)/(p-l)\r\n self.r_shout = (math.log((1 - a) * (math.exp(theta) - 1) + 1)) / theta\r\n\r\n def calcAgg(self):\r\n delta = 0\r\n if self.job == 'Bid':\r\n # BUYER\r\n if self.buy_target >= self.previous_transactions[-1] :\r\n # must be more aggressive\r\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\r\n else :\r\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\r\n\r\n self.buy_r = self.buy_r + self.short_term_learning_rate * (delta - self.buy_r)\r\n\r\n if self.job == 'Ask':\r\n # SELLER\r\n if self.sell_target > self.previous_transactions[-1] :\r\n delta = (1+self.r_shout_change_relative)*self.r_shout + self.r_shout_change_absolute\r\n else :\r\n delta = (1-self.r_shout_change_relative)*self.r_shout - self.r_shout_change_absolute\r\n\r\n self.sell_r = self.sell_r + self.short_term_learning_rate * (delta - self.sell_r)\r\n\r\n def calcTarget(self):\r\n if len(self.estimated_equilibrium) > 0:\r\n p = self.estimated_equilibrium[-1]\r\n if self.limit == p:\r\n p = p * 1.000001 # to prevent theta_bar = 0\r\n elif self.job == 'Bid':\r\n p = self.limit - self.limit * 0.2 ## Initial guess for eq if no deals yet!!....\r\n elif self.job == 'Ask':\r\n p = self.limit + self.limit * 0.2\r\n l = self.limit\r\n theta = self.theta\r\n if self.job == 'Bid':\r\n #BUYER\r\n minus_thing = (math.exp(-self.buy_r * theta) - 1) / (math.exp(theta) - 1)\r\n plus_thing = (math.exp(self.buy_r * theta) - 1) / (math.exp(theta) - 1)\r\n theta_bar = (theta * l - theta * p) / p\r\n if theta_bar == 0:\r\n theta_bar = 0.0001\r\n if math.exp(theta_bar) - 1 == 0:\r\n theta_bar = 0.0001\r\n bar_thing = (math.exp(-self.buy_r * theta_bar) - 1) / (math.exp(theta_bar) - 1)\r\n if l <= p: #Extramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = l\r\n else:\r\n self.buy_target = l * (1 - minus_thing)\r\n else: #intramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = p + (l-p)*plus_thing\r\n else:\r\n self.buy_target = p*(1-bar_thing)\r\n if self.buy_target > l:\r\n self.buy_target = l\r\n\r\n if self.job == 'Ask':\r\n #SELLER\r\n minus_thing = (math.exp(-self.sell_r * theta) - 1) / (math.exp(theta) - 1)\r\n plus_thing = (math.exp(self.sell_r * theta) - 1) / (math.exp(theta) - 1)\r\n theta_bar = (theta * l - theta * p) / p\r\n if theta_bar == 0:\r\n theta_bar = 0.0001\r\n if math.exp(theta_bar) - 1 == 0:\r\n theta_bar = 0.0001\r\n bar_thing = (math.exp(-self.sell_r * theta_bar) - 1) / (math.exp(theta_bar) - 1) #div 0 sometimes what!?\r\n if l <= p: #Extramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = l\r\n else:\r\n self.buy_target = l + (self.marketMax - l)*(minus_thing)\r\n else: #intramarginal\r\n if self.buy_r >= 0:\r\n self.buy_target = l + (p-l)*(1-plus_thing)\r\n else:\r\n self.buy_target = p + (self.marketMax - p)*(bar_thing)\r\n if self.sell_target < l:\r\n self.sell_target = l\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n return None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].otype\r\n self.calcTarget()\r\n\r\n if self.prev_best_bid_p == None:\r\n o_bid = 0\r\n else:\r\n o_bid = self.prev_best_bid_p\r\n if self.prev_best_ask_p == None:\r\n o_ask = self.marketMax\r\n else:\r\n o_ask = self.prev_best_ask_p\r\n\r\n if self.job == 'Bid': #BUYER\r\n if self.limit <= o_bid:\r\n return None\r\n else:\r\n if len(self.previous_transactions) > 0: ## has been at least one transaction\r\n o_ask_plus = (1+self.r_shout_change_relative)*o_ask + self.r_shout_change_absolute\r\n quoteprice = o_bid + ((min(self.limit, o_ask_plus) - o_bid) / self.offer_change_rate)\r\n else:\r\n if o_ask <= self.buy_target:\r\n quoteprice = o_ask\r\n else:\r\n quoteprice = o_bid + ((self.buy_target - o_bid) / self.offer_change_rate)\r\n if self.job == 'Ask':\r\n if self.limit >= o_ask:\r\n return None\r\n else:\r\n if len(self.previous_transactions) > 0: ## has been at least one transaction\r\n o_bid_minus = (1-self.r_shout_change_relative) * o_bid - self.r_shout_change_absolute\r\n quoteprice = o_ask - ((o_ask - max(self.limit, o_bid_minus)) / self.offer_change_rate)\r\n else:\r\n if o_bid >= self.sell_target:\r\n quoteprice = o_bid\r\n else:\r\n quoteprice = o_ask - ((o_ask - self.sell_target) / self.offer_change_rate)\r\n\r\n\r\n order = Order(self.tid,\r\n self.orders[0].otype,\r\n quoteprice,\r\n self.orders[0].qty,\r\n time, lob['QID'])\r\n self.lastquote=order\r\n return order\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n ## Begin nicked from ZIP\r\n\r\n # what, if anything, has happened on the bid LOB? Nicked from ZIP..\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['best']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['best']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n ## End nicked from ZIP\r\n\r\n if deal:\r\n self.previous_transactions.append(trade['price'])\r\n if self.sell_target == None:\r\n self.sell_target = trade['price']\r\n if self.buy_target == None:\r\n self.buy_target = trade['price']\r\n self.calcEq()\r\n self.calcAlpha()\r\n self.calcTheta()\r\n self.calcRshout()\r\n self.calcAgg()\r\n self.calcTarget()\r\n #print 'sell: ', self.sell_target, 'buy: ', self.buy_target, 'limit:', self.limit, 'eq: ', self.estimated_equilibrium[-1], 'sell_r: ', self.sell_r, 'buy_r: ', self.buy_r, '\\n'\r\n\r\n\r\n\r\n# Trader subclass ZI-C\r\n# After Gode & Sunder 1993\r\nclass Trader_ZIC(Trader):\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n # no orders: return NULL\r\n order = None\r\n else:\r\n minprice = lob['bids']['worst']\r\n maxprice = lob['asks']['worst']\r\n qid = lob['QID']\r\n limit = self.orders[0].price\r\n otype = self.orders[0].otype\r\n if otype == 'Bid':\r\n quoteprice = random.randint(minprice, limit)\r\n else:\r\n quoteprice = random.randint(limit, maxprice)\r\n # NB should check it == 'Ask' and barf if not\r\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, qid)\r\n self.lastquote = order\r\n return order\r\n\r\n\r\n# Trader subclass Shaver\r\n# shaves a penny off the best price\r\n# if there is no best price, creates \"stub quote\" at system max/min\r\nclass Trader_Shaver(Trader):\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n order = None\r\n else:\r\n limitprice = self.orders[0].price\r\n otype = self.orders[0].otype\r\n if otype == 'Bid':\r\n if lob['bids']['n'] > 0:\r\n quoteprice = lob['bids']['best'] + 1\r\n if quoteprice > limitprice :\r\n quoteprice = limitprice\r\n else:\r\n quoteprice = lob['bids']['worst']\r\n else:\r\n if lob['asks']['n'] > 0:\r\n quoteprice = lob['asks']['best'] - 1\r\n if quoteprice < limitprice:\r\n quoteprice = limitprice\r\n else:\r\n quoteprice = lob['asks']['worst']\r\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, lob['QID'])\r\n self.lastquote = order\r\n return order\r\n\r\n\r\n# Trader subclass Sniper\r\n# Based on Shaver,\r\n# \"lurks\" until time remaining < threshold% of the trading session\r\n# then gets increasing aggressive, increasing \"shave thickness\" as time runs out\r\nclass Trader_Sniper(Trader):\r\n\r\n def getorder(self, time, countdown, lob):\r\n lurk_threshold = 0.2\r\n shavegrowthrate = 3\r\n shave = int(1.0 / (0.01 + countdown / (shavegrowthrate * lurk_threshold)))\r\n if (len(self.orders) < 1) or (countdown > lurk_threshold):\r\n order = None\r\n else:\r\n limitprice = self.orders[0].price\r\n otype = self.orders[0].otype\r\n\r\n if otype == 'Bid':\r\n if lob['bids']['n'] > 0:\r\n quoteprice = lob['bids']['best'] + shave\r\n if quoteprice > limitprice :\r\n quoteprice = limitprice\r\n else:\r\n quoteprice = lob['bids']['worst']\r\n else:\r\n if lob['asks']['n'] > 0:\r\n quoteprice = lob['asks']['best'] - shave\r\n if quoteprice < limitprice:\r\n quoteprice = limitprice\r\n else:\r\n quoteprice = lob['asks']['worst']\r\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, lob['QID'])\r\n self.lastquote = order\r\n return order\r\n\r\n\r\n\r\n\r\n# Trader subclass ZIP\r\n# After Cliff 1997\r\nclass Trader_ASAD(Trader):\r\n\r\n # ZIP init key param-values are those used in Cliff's 1997 original HP Labs tech report\r\n # NB this implementation keeps separate margin values for buying & selling,\r\n # so a single trader can both buy AND sell\r\n # -- in the original, traders were either buyers OR sellers\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n self.ttype = ttype\r\n self.tid = tid\r\n self.balance = balance\r\n self.birthtime = time\r\n self.profitpertime = 0\r\n self.n_trades = 0\r\n self.blotter = []\r\n self.orders = []\r\n self.prev_orders = []\r\n self.n_quotes = 0\r\n self.lastquote = None\r\n self.job = None # this gets switched to 'Bid' or 'Ask' depending on order-type\r\n self.active = False # gets switched to True while actively working an order\r\n self.prev_change = 0 # this was called last_d in Cliff'97\r\n self.beta = 0.1 + 0.4 * random.random()\r\n self.momntm = 0.1 * random.random()\r\n self.ca = 0.05 # self.ca & .cr were hard-coded in '97 but parameterised later\r\n self.cr = 0.05\r\n self.margin = None # this was called profit in Cliff'97\r\n self.margin_buy = -1.0 * (0.05 + 0.3 * random.random())\r\n self.margin_sell = 0.05 + 0.3 * random.random()\r\n self.price = None\r\n self.limit = None\r\n self.phi = 0 #measure of market shock for ASAD\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].otype\r\n if self.job == 'Bid':\r\n # currently a buyer (working a bid order)\r\n self.margin = self.margin_buy\r\n else:\r\n # currently a seller (working a sell order)\r\n self.margin = self.margin_sell\r\n quoteprice = int(self.limit * (1 + self.margin))\r\n self.price = quoteprice\r\n\r\n order = Order(self.tid, self.job, quoteprice, self.orders[0].qty, time, lob['QID'])\r\n self.lastquote = order\r\n self.prev_orders.append(order)\r\n return order\r\n\r\n\r\n # update margin on basis of what happened in market\r\n def respond(self, time, lob, trade, verbose):\r\n # ZIP trader responds to market events, altering its margin\r\n # does this whether it currently has an order to work or not\r\n\r\n def target_up(price):\r\n # generate a higher target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 + (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel + ptrb_abs, 0))\r\n# # print('TargetUp: %d %d\\n' % (price,target))\r\n return(target)\r\n\r\n\r\n def target_down(price):\r\n # generate a lower target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 - (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel - ptrb_abs, 0))\r\n# # print('TargetDn: %d %d\\n' % (price,target))\r\n return(target)\r\n\r\n\r\n def willing_to_trade(price):\r\n # am I willing to trade at this price?\r\n willing = False\r\n if self.job == 'Bid' and self.active and self.price >= price:\r\n willing = True\r\n if self.job == 'Ask' and self.active and self.price <= price:\r\n willing = True\r\n return willing\r\n\r\n\r\n def profit_alter(price):\r\n oldprice = self.price\r\n diff = price - oldprice\r\n change = ((1.0 - self.momntm) * (self.beta * diff)) + (self.momntm * self.prev_change)\r\n self.prev_change = change\r\n newmargin = ((self.price + change) / self.limit) - 1.0\r\n\r\n if self.job == 'Bid':\r\n if newmargin < 0.0 :\r\n self.margin_buy = newmargin\r\n self.margin = newmargin\r\n else :\r\n if newmargin > 0.0 :\r\n self.margin_sell = newmargin\r\n self.margin = newmargin\r\n\r\n # set the price from limit and profit-margin\r\n self.price = int(round(self.limit * (1.0 + self.margin), 0))\r\n# # print('old=%d diff=%d change=%d price = %d\\n' % (oldprice, diff, change, self.price))\r\n\r\n def calc_phi():\r\n if len(self.prev_orders) < 20:\r\n return\r\n sumxy = 0\r\n sumx = 0\r\n sumxsq = 0\r\n sumy = 0\r\n for i in range(20):\r\n sumxy = sumxy + i * self.prev_orders[-20:][i].price\r\n sumx = sumx + i\r\n sumxsq = sumxsq + i*i\r\n sumy = sumy + self.prev_orders[-20:][i].price\r\n delta = (sumxy - (sumy * sumx / 20)) / (sumxsq - (sumx * sumx / 20))\r\n if delta < 0:\r\n self.phi = -math.log(1-delta)\r\n else:\r\n self.phi = math.log(1+delta)\r\n\r\n # what, if anything, has happened on the bid LOB?\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['best']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['best']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n\r\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\r\n print ('B_improved', bid_improved, 'B_hit', bid_hit, 'A_improved', ask_improved, 'A_lifted', ask_lifted)\r\n\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n calc_phi()\r\n\r\n\r\n if self.job == 'Ask':\r\n # seller\r\n if deal :\r\n tradeprice = trade['price']\r\n if self.price <= tradeprice:\r\n # could sell for more? raise margin\r\n target_price = target_up(tradeprice)\r\n if self.phi > 1:\r\n target_price = target_up(target_price)\r\n profit_alter(target_price)\r\n elif ask_lifted and self.active and not willing_to_trade(tradeprice):\r\n # wouldnt have got this deal, still working order, so reduce margin\r\n target_price = target_down(tradeprice)\r\n if self.phi > 1:\r\n target_price = target_up(target_price)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for a target price higher than best bid\r\n if ask_improved and self.price > lob_best_ask_p:\r\n if lob_best_bid_p != None:\r\n target_price = target_up(lob_best_bid_p)\r\n else:\r\n target_price = lob['asks']['worst'] # stub quote\r\n profit_alter(target_price)\r\n\r\n if self.job == 'Bid':\r\n # buyer\r\n if deal :\r\n tradeprice = trade['price']\r\n if self.price >= tradeprice:\r\n # could buy for less? raise margin (i.e. cut the price)\r\n target_price = target_down(tradeprice)\r\n if self.phi < -1:\r\n target_price = target_up(target_price)\r\n profit_alter(target_price)\r\n elif bid_hit and self.active and not willing_to_trade(tradeprice):\r\n # wouldnt have got this deal, still working order, so reduce margin\r\n target_price = target_up(tradeprice)\r\n if self.phi < -1:\r\n target_price = target_up(target_price)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for target price lower than best ask\r\n if bid_improved and self.price < lob_best_bid_p:\r\n if lob_best_ask_p != None:\r\n target_price = target_down(lob_best_ask_p)\r\n else:\r\n target_price = lob['bids']['worst'] # stub quote\r\n profit_alter(target_price)\r\n\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n\r\n# Trader subclass ZIP\r\n# After Cliff 1997\r\nclass Trader_GDX(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n self.ttype = ttype\r\n self.tid = tid\r\n self.balance = balance\r\n self.birthtime = time\r\n self.profitpertime = 0\r\n self.n_trades = 0\r\n self.blotter = []\r\n self.orders = []\r\n self.prev_orders = []\r\n self.n_quotes = 0\r\n self.lastquote = None\r\n self.job = None # this gets switched to 'Bid' or 'Ask' depending on order-type\r\n self.active = False # gets switched to True while actively working an order\r\n\r\n #memory of all bids and asks and accepted bids and asks\r\n self.outstanding_bids = []\r\n self.outstanding_asks = []\r\n self.accepted_asks = []\r\n self.accepted_bids = []\r\n\r\n self.price = -1\r\n\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n self.first_turn = True\r\n\r\n self.gamma = 0.1\r\n\r\n self.holdings = 10\r\n self.remaining_offer_ops = 10\r\n self.values = [[0 for n in range(self.remaining_offer_ops)] for m in range(self.holdings)]\r\n\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].otype\r\n\r\n #calculate price\r\n if self.job == 'Bid':\r\n self.price = self.calc_p_bid(self.holdings - 1, self.remaining_offer_ops - 1)\r\n if self.job == 'Ask':\r\n self.price = self.calc_p_ask(self.holdings - 1, self.remaining_offer_ops - 1)\r\n\r\n order = Order(self.tid, self.job, self.price, self.orders[0].qty, time, lob['QID'])\r\n self.lastquote = order\r\n\r\n if self.first_turn or self.price == -1:\r\n return None\r\n return order\r\n\r\n def calc_p_bid(self, m, n):\r\n best_return = 0\r\n best_bid = 0\r\n second_best_return = 0\r\n second_best_bid = 0\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n thing = self.belief_buy(i) * ((self.limit - i) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_bid = best_bid\r\n second_best_return = best_return\r\n best_return = thing\r\n best_bid = i\r\n\r\n #always best bid largest one\r\n if second_best_bid > best_bid:\r\n a = second_best_bid\r\n second_best_bid = best_bid\r\n best_bid = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_bid), int(best_bid))]:\r\n thing = self.belief_buy(i + second_best_bid) * ((self.limit - (i + second_best_bid)) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i + second_best_bid) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_bid = i + second_best_bid\r\n\r\n return best_bid\r\n\r\n def calc_p_ask(self, m, n):\r\n best_return = 0\r\n best_ask = self.limit\r\n second_best_return = 0\r\n second_best_ask = self.limit\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n j = i + self.limit\r\n thing = self.belief_sell(j) * ((j - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(j) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_ask = best_ask\r\n second_best_return = best_return\r\n best_return = thing\r\n best_ask = j\r\n #always best ask largest one\r\n if second_best_ask > best_ask:\r\n a = second_best_ask\r\n second_best_ask = best_ask\r\n best_ask = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_ask), int(best_ask))]:\r\n thing = self.belief_sell(i + second_best_ask) * (((i + second_best_ask) - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(i + second_best_ask) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_ask = i + second_best_ask\r\n\r\n return best_ask\r\n\r\n def belief_sell(self, price):\r\n accepted_asks_greater = 0\r\n bids_greater = 0\r\n unaccepted_asks_lower = 0\r\n for p in self.accepted_asks:\r\n if p >= price:\r\n accepted_asks_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n bids_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n unaccepted_asks_lower += 1\r\n\r\n if accepted_asks_greater + bids_greater + unaccepted_asks_lower == 0:\r\n return 0\r\n return (accepted_asks_greater + bids_greater) / (accepted_asks_greater + bids_greater + unaccepted_asks_lower)\r\n\r\n def belief_buy(self, price):\r\n accepted_bids_lower = 0\r\n asks_lower = 0\r\n unaccepted_bids_greater = 0\r\n for p in self.accepted_bids:\r\n if p <= price:\r\n accepted_bids_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n asks_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n unaccepted_bids_greater += 1\r\n if accepted_bids_lower + asks_lower + unaccepted_bids_greater == 0:\r\n return 0\r\n return (accepted_bids_lower + asks_lower) / (accepted_bids_lower + asks_lower + unaccepted_bids_greater)\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n # what, if anything, has happened on the bid LOB?\r\n self.outstanding_bids = lob['bids']['lob']\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['best']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n self.accepted_bids.append(self.prev_best_bid_p)\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n self.outstanding_asks = lob['asks']['lob']\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['best']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n self.accepted_asks.append(self.prev_best_ask_p)\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n\r\n #populate expected values\r\n if self.first_turn:\r\n print \"populating\"\r\n self.first_turn = False\r\n for n in range(1, self.remaining_offer_ops):\r\n for m in range(1, self.holdings):\r\n if self.job == 'Bid':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_bid(m, n)\r\n\r\n if self.job == 'Ask':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_ask(m, n)\r\n print \"done\"\r\n\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n\r\n\r\n\r\n# Trader subclass ZIP\r\n# After Cliff 1997\r\nclass Trader_ZIP(Trader):\r\n\r\n # ZIP init key param-values are those used in Cliff's 1997 original HP Labs tech report\r\n # NB this implementation keeps separate margin values for buying & selling,\r\n # so a single trader can both buy AND sell\r\n # -- in the original, traders were either buyers OR sellers\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n self.ttype = ttype\r\n self.tid = tid\r\n self.balance = balance\r\n self.birthtime = time\r\n self.profitpertime = 0\r\n self.n_trades = 0\r\n self.blotter = []\r\n self.orders = []\r\n self.n_quotes = 0\r\n self.lastquote = None\r\n self.job = None # this gets switched to 'Bid' or 'Ask' depending on order-type\r\n self.active = False # gets switched to True while actively working an order\r\n self.prev_change = 0 # this was called last_d in Cliff'97\r\n self.beta = 0.1 + 0.4 * random.random()\r\n self.momntm = 0.1 * random.random()\r\n self.ca = 0.05 # self.ca & .cr were hard-coded in '97 but parameterised later\r\n self.cr = 0.05\r\n self.margin = None # this was called profit in Cliff'97\r\n self.margin_buy = -1.0 * (0.05 + 0.3 * random.random())\r\n self.margin_sell = 0.05 + 0.3 * random.random()\r\n self.price = None\r\n self.limit = None\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n\r\n def getorder(self, time, countdown, lob):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].otype\r\n if self.job == 'Bid':\r\n # currently a buyer (working a bid order)\r\n self.margin = self.margin_buy\r\n else:\r\n # currently a seller (working a sell order)\r\n self.margin = self.margin_sell\r\n quoteprice = int(self.limit * (1 + self.margin))\r\n self.price = quoteprice\r\n\r\n order = Order(self.tid, self.job, quoteprice, self.orders[0].qty, time, lob['QID'])\r\n self.lastquote = order\r\n return order\r\n\r\n\r\n # update margin on basis of what happened in market\r\n def respond(self, time, lob, trade, verbose):\r\n # ZIP trader responds to market events, altering its margin\r\n # does this whether it currently has an order to work or not\r\n\r\n def target_up(price):\r\n # generate a higher target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 + (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel + ptrb_abs, 0))\r\n# # print('TargetUp: %d %d\\n' % (price,target))\r\n return(target)\r\n\r\n\r\n def target_down(price):\r\n # generate a lower target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 - (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel - ptrb_abs, 0))\r\n# # print('TargetDn: %d %d\\n' % (price,target))\r\n return(target)\r\n\r\n\r\n def willing_to_trade(price):\r\n # am I willing to trade at this price?\r\n willing = False\r\n if self.job == 'Bid' and self.active and self.price >= price:\r\n willing = True\r\n if self.job == 'Ask' and self.active and self.price <= price:\r\n willing = True\r\n return willing\r\n\r\n\r\n def profit_alter(price):\r\n oldprice = self.price\r\n diff = price - oldprice\r\n change = ((1.0 - self.momntm) * (self.beta * diff)) + (self.momntm * self.prev_change)\r\n self.prev_change = change\r\n newmargin = ((self.price + change) / self.limit) - 1.0\r\n\r\n if self.job == 'Bid':\r\n if newmargin < 0.0 :\r\n self.margin_buy = newmargin\r\n self.margin = newmargin\r\n else :\r\n if newmargin > 0.0 :\r\n self.margin_sell = newmargin\r\n self.margin = newmargin\r\n\r\n # set the price from limit and profit-margin\r\n self.price = int(round(self.limit * (1.0 + self.margin), 0))\r\n# # print('old=%d diff=%d change=%d price = %d\\n' % (oldprice, diff, change, self.price))\r\n\r\n\r\n # what, if anything, has happened on the bid LOB?\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['best']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['best']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n\r\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\r\n print ('B_improved', bid_improved, 'B_hit', bid_hit, 'A_improved', ask_improved, 'A_lifted', ask_lifted)\r\n\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n if self.job == 'Ask':\r\n # seller\r\n if deal :\r\n tradeprice = trade['price']\r\n if self.price <= tradeprice:\r\n # could sell for more? raise margin\r\n target_price = target_up(tradeprice)\r\n profit_alter(target_price)\r\n elif ask_lifted and self.active and not willing_to_trade(tradeprice):\r\n # wouldnt have got this deal, still working order, so reduce margin\r\n target_price = target_down(tradeprice)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for a target price higher than best bid\r\n if ask_improved and self.price > lob_best_ask_p:\r\n if lob_best_bid_p != None:\r\n target_price = target_up(lob_best_bid_p)\r\n else:\r\n target_price = lob['asks']['worst'] # stub quote\r\n profit_alter(target_price)\r\n\r\n if self.job == 'Bid':\r\n # buyer\r\n if deal :\r\n tradeprice = trade['price']\r\n if self.price >= tradeprice:\r\n # could buy for less? raise margin (i.e. cut the price)\r\n target_price = target_down(tradeprice)\r\n profit_alter(target_price)\r\n elif bid_hit and self.active and not willing_to_trade(tradeprice):\r\n # wouldnt have got this deal, still working order, so reduce margin\r\n target_price = target_up(tradeprice)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for target price lower than best ask\r\n if bid_improved and self.price < lob_best_bid_p:\r\n if lob_best_ask_p != None:\r\n target_price = target_down(lob_best_ask_p)\r\n else:\r\n target_price = lob['bids']['worst'] # stub quote\r\n profit_alter(target_price)\r\n\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n\r\n\r\n\r\n##########################---trader-types have all been defined now--################\r\n\r\n\r\n\r\n\r\n##########################---Below lies the experiment/test-rig---##################\r\n\r\n\r\n\r\n# trade_stats()\r\n# dump CSV statistics on exchange data and trader population to file for later analysis\r\n# this makes no assumptions about the number of types of traders, or\r\n# the number of traders of any one type -- allows either/both to change\r\n# between successive calls, but that does make it inefficient as it has to\r\n# re-analyse the entire set of traders on each call\r\ndef trade_stats(expid, traders, dumpfile, time, lob):\r\n trader_types = {}\r\n n_traders = len(traders)\r\n for t in traders:\r\n ttype = traders[t].ttype\r\n if ttype in trader_types.keys():\r\n t_balance = trader_types[ttype]['balance_sum'] + traders[t].balance\r\n n = trader_types[ttype]['n'] + 1\r\n else:\r\n t_balance = traders[t].balance\r\n n = 1\r\n trader_types[ttype] = {'n':n, 'balance_sum':t_balance}\r\n\r\n\r\n dumpfile.write('%s, %06d, ' % (expid, time))\r\n printing_column = 0\r\n for ttype in sorted(list(trader_types.keys())):\r\n n = trader_types[ttype]['n']\r\n #to keep the traders in the same columns, make data easier\r\n if (ttype == 'AA'):\r\n s = trader_types[ttype]['balance_sum']\r\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\r\n printing_column = 1\r\n\r\n if (ttype == 'ASAD'):\r\n if (printing_column == 0):\r\n dumpfile.write('%s, %s, %s, %s, ' % ('', '', '', ''))\r\n s = trader_types[ttype]['balance_sum']\r\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\r\n printing_column = 2\r\n\r\n if (ttype == 'GDX'):\r\n for i in range(2 - printing_column):\r\n dumpfile.write('%s, %s, %s, %s, ' % ('', '', '', ''))\r\n s = trader_types[ttype]['balance_sum']\r\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\r\n printing_column = 3\r\n\r\n if (ttype == 'ZIP'):\r\n for i in range(3 - printing_column):\r\n dumpfile.write('%s, %s, %s, %s, ' % ('', '', '', ''))\r\n s = trader_types[ttype]['balance_sum']\r\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\r\n printing_column = 4\r\n while printing_column < 4:\r\n dumpfile.write('%s, %s, %s, %s, ' % ('', '', '', ''))\r\n printing_column += 1\r\n\r\n\r\n if lob['bids']['best'] != None :\r\n dumpfile.write('%d, ' % (lob['bids']['best']))\r\n else:\r\n dumpfile.write('N, ')\r\n if lob['asks']['best'] != None :\r\n dumpfile.write('%d, ' % (lob['asks']['best']))\r\n else:\r\n dumpfile.write('N, ')\r\n dumpfile.write('\\n');\r\n\r\n\r\n\r\n\r\n\r\n# create a bunch of traders from traders_spec\r\n# returns tuple (n_buyers, n_sellers)\r\n# optionally shuffles the pack of buyers and the pack of sellers\r\ndef populate_market(traders_spec, traders, shuffle, verbose):\r\n\r\n def trader_type(robottype, name):\r\n if robottype == 'AA':\r\n return Trader_AA('AA', name, 0.00, 0)\r\n elif robottype == 'ZIC':\r\n return Trader_ZIC('ZIC', name, 0.00, 0)\r\n elif robottype == 'GDX':\r\n return Trader_GDX('GDX', name, 0.00, 0)\r\n elif robottype == 'SNPR':\r\n return Trader_Sniper('SNPR', name, 0.00, 0)\r\n elif robottype == 'ZIP':\r\n return Trader_ZIP('ZIP', name, 0.00, 0)\r\n elif robottype == 'ASAD':\r\n return Trader_ASAD('ASAD', name, 0.00, 0)\r\n else:\r\n sys.exit('FATAL: don\\'t know robot type %s\\n' % robottype)\r\n\r\n\r\n def shuffle_traders(ttype_char, n, traders):\r\n for swap in range(n):\r\n t1 = (n - 1) - swap\r\n t2 = random.randint(0, t1)\r\n t1name = '%c%02d' % (ttype_char, t1)\r\n t2name = '%c%02d' % (ttype_char, t2)\r\n traders[t1name].tid = t2name\r\n traders[t2name].tid = t1name\r\n temp = traders[t1name]\r\n traders[t1name] = traders[t2name]\r\n traders[t2name] = temp\r\n\r\n\r\n n_buyers = 0\r\n for bs in traders_spec['buyers']:\r\n ttype = bs[0]\r\n for b in range(bs[1]):\r\n tname = 'B%02d' % n_buyers # buyer i.d. string\r\n traders[tname] = trader_type(ttype, tname)\r\n n_buyers = n_buyers + 1\r\n\r\n if n_buyers < 1:\r\n sys.exit('FATAL: no buyers specified\\n')\r\n\r\n if shuffle: shuffle_traders('B', n_buyers, traders)\r\n\r\n\r\n n_sellers = 0\r\n for ss in traders_spec['sellers']:\r\n ttype = ss[0]\r\n for s in range(ss[1]):\r\n tname = 'S%02d' % n_sellers # buyer i.d. string\r\n traders[tname] = trader_type(ttype, tname)\r\n n_sellers = n_sellers + 1\r\n\r\n if n_sellers < 1:\r\n sys.exit('FATAL: no sellers specified\\n')\r\n\r\n if shuffle: shuffle_traders('S', n_sellers, traders)\r\n\r\n if verbose :\r\n for t in range(n_buyers):\r\n bname = 'B%02d' % t\r\n print(traders[bname])\r\n for t in range(n_sellers):\r\n bname = 'S%02d' % t\r\n print(traders[bname])\r\n\r\n\r\n return {'n_buyers':n_buyers, 'n_sellers':n_sellers}\r\n\r\n\r\n\r\n# customer_orders(): allocate orders to traders\r\n# parameter \"os\" is order schedule\r\n# os['timemode'] is either 'periodic', 'drip-fixed', 'drip-jitter', or 'drip-poisson'\r\n# os['interval'] is number of seconds for a full cycle of replenishment\r\n# drip-poisson sequences will be normalised to ensure time of last replenishment <= interval\r\n# parameter \"pending\" is the list of future orders (if this is empty, generates a new one from os)\r\n# revised \"pending\" is the returned value\r\n#\r\n# also returns a list of \"cancellations\": trader-ids for those traders who are now working a new order and hence\r\n# need to kill quotes already on LOB from working previous order\r\n#\r\n#\r\n# if a supply or demand schedule mode is \"random\" and more than one range is supplied in ranges[],\r\n# then each time a price is generated one of the ranges is chosen equiprobably and\r\n# the price is then generated uniform-randomly from that range\r\n#\r\n# if len(range)==2, interpreted as min and max values on the schedule, specifying linear supply/demand curve\r\n# if len(range)==3, first two vals are min & max, third value should be a function that generates a dynamic price offset\r\n# -- the offset value applies equally to the min & max, so gradient of linear sup/dem curve doesn't vary\r\n# if len(range)==4, the third value is function that gives dynamic offset for schedule min,\r\n# and fourth is a function giving dynamic offset for schedule max, so gradient of sup/dem linear curve can vary\r\n#\r\n# the interface on this is a bit of a mess... could do with refactoring\r\n\r\n\r\ndef customer_orders(time, last_update, traders, trader_stats, os, pending, verbose):\r\n\r\n\r\n def sysmin_check(price):\r\n if price < bse_sys_minprice:\r\n print('WARNING: price < bse_sys_min -- clipped')\r\n price = bse_sys_minprice\r\n return price\r\n\r\n\r\n def sysmax_check(price):\r\n if price > bse_sys_maxprice:\r\n print('WARNING: price > bse_sys_max -- clipped')\r\n price = bse_sys_maxprice\r\n return price\r\n\r\n\r\n\r\n def getorderprice(i, sched, n, mode, issuetime):\r\n # does the first schedule range include optional dynamic offset function(s)?\r\n if len(sched[0]) > 2:\r\n offsetfn = sched[0][2]\r\n if callable(offsetfn):\r\n # same offset for min and max\r\n offset_min = offsetfn(issuetime)\r\n offset_max = offset_min\r\n else:\r\n sys.exit('FAIL: 3rd argument of sched in getorderprice() not callable')\r\n if len(sched[0]) > 3:\r\n # if second offset function is specfied, that applies only to the max value\r\n offsetfn = sched[0][3]\r\n if callable(offsetfn):\r\n # this function applies to max\r\n offset_max = offsetfn(issuetime)\r\n else:\r\n sys.exit('FAIL: 4th argument of sched in getorderprice() not callable')\r\n else:\r\n offset_min = 0.0\r\n offset_max = 0.0\r\n\r\n pmin = sysmin_check(offset_min + min(sched[0][0], sched[0][1]))\r\n pmax = sysmax_check(offset_max + max(sched[0][0], sched[0][1]))\r\n prange = pmax - pmin\r\n stepsize = prange / (n - 1)\r\n halfstep = round(stepsize / 2.0)\r\n\r\n if mode == 'fixed':\r\n orderprice = pmin + int(i * stepsize)\r\n elif mode == 'jittered':\r\n orderprice = pmin + int(i * stepsize) + random.randint(-halfstep, halfstep)\r\n elif mode == 'random':\r\n if len(sched) > 1:\r\n # more than one schedule: choose one equiprobably\r\n s = random.randint(0, len(sched) - 1)\r\n pmin = sysmin_check(min(sched[s][0], sched[s][1]))\r\n pmax = sysmax_check(max(sched[s][0], sched[s][1]))\r\n orderprice = random.randint(pmin, pmax)\r\n else:\r\n sys.exit('FAIL: Unknown mode in schedule')\r\n orderprice = sysmin_check(sysmax_check(orderprice))\r\n return orderprice\r\n\r\n\r\n\r\n def getissuetimes(n_traders, mode, interval, shuffle, fittointerval):\r\n interval = float(interval)\r\n if n_traders < 1:\r\n sys.exit('FAIL: n_traders < 1 in getissuetime()')\r\n elif n_traders == 1:\r\n tstep = interval\r\n else:\r\n tstep = interval / (n_traders - 1)\r\n arrtime = 0\r\n issuetimes = []\r\n for t in range(n_traders):\r\n if mode == 'periodic':\r\n arrtime = interval\r\n elif mode == 'drip-fixed':\r\n arrtime = t * tstep\r\n elif mode == 'drip-jitter':\r\n arrtime = t * tstep + tstep * random.random()\r\n elif mode == 'drip-poisson':\r\n # poisson requires a bit of extra work\r\n interarrivaltime = random.expovariate(n_traders / interval)\r\n arrtime += interarrivaltime\r\n else:\r\n sys.exit('FAIL: unknown time-mode in getissuetimes()')\r\n issuetimes.append(arrtime)\r\n\r\n # at this point, arrtime is the last arrival time\r\n if fittointerval and ((arrtime > interval) or (arrtime < interval)):\r\n # generated sum of interarrival times longer than the interval\r\n # squish them back so that last arrival falls at t=interval\r\n for t in range(n_traders):\r\n issuetimes[t] = interval * (issuetimes[t] / arrtime)\r\n # optionally randomly shuffle the times\r\n if shuffle:\r\n for t in range(n_traders):\r\n i = (n_traders - 1) - t\r\n j = random.randint(0, i)\r\n tmp = issuetimes[i]\r\n issuetimes[i] = issuetimes[j]\r\n issuetimes[j] = tmp\r\n return issuetimes\r\n\r\n\r\n def getschedmode(time, os):\r\n got_one = False\r\n for sched in os:\r\n if (sched['from'] <= time) and (time < sched['to']) :\r\n # within the timezone for this schedule\r\n schedrange = sched['ranges']\r\n mode = sched['stepmode']\r\n got_one = True\r\n exit # jump out the loop -- so the first matching timezone has priority over any others\r\n if not got_one:\r\n sys.exit('Fail: time=%5.2f not within any timezone in os=%s' % (time, os))\r\n return (schedrange, mode)\r\n\r\n\r\n n_buyers = trader_stats['n_buyers']\r\n n_sellers = trader_stats['n_sellers']\r\n\r\n shuffle_times = False\r\n\r\n cancellations = []\r\n\r\n if len(pending) < 1:\r\n # list of pending (to-be-issued) customer orders is empty, so generate a new one\r\n new_pending = []\r\n\r\n # demand side (buyers)\r\n issuetimes = getissuetimes(n_buyers, os['timemode'], os['interval'], shuffle_times, True)\r\n\r\n ordertype = 'Bid'\r\n (sched, mode) = getschedmode(time, os['dem'])\r\n for t in range(n_buyers):\r\n issuetime = time + issuetimes[t]\r\n tname = 'B%02d' % t\r\n orderprice = getorderprice(t, sched, n_buyers, mode, issuetime)\r\n order = Order(tname, ordertype, orderprice, 1, issuetime, -3.14)\r\n new_pending.append(order)\r\n\r\n # supply side (sellers)\r\n issuetimes = getissuetimes(n_sellers, os['timemode'], os['interval'], shuffle_times, True)\r\n ordertype = 'Ask'\r\n (sched, mode) = getschedmode(time, os['sup'])\r\n for t in range(n_sellers):\r\n issuetime = time + issuetimes[t]\r\n tname = 'S%02d' % t\r\n orderprice = getorderprice(t, sched, n_sellers, mode, issuetime)\r\n order = Order(tname, ordertype, orderprice, 1, issuetime, -3.14)\r\n new_pending.append(order)\r\n else:\r\n # there are pending future orders: issue any whose timestamp is in the past\r\n new_pending = []\r\n for order in pending:\r\n if order.time < time:\r\n # this order should have been issued by now\r\n # issue it to the trader\r\n tname = order.tid\r\n response = traders[tname].add_order(order, verbose)\r\n if verbose: print('Customer order: %s %s' % (response, order) )\r\n if response == 'LOB_Cancel' :\r\n cancellations.append(tname)\r\n if verbose: print('Cancellations: %s' % (cancellations))\r\n # and then don't add it to new_pending (i.e., delete it)\r\n else:\r\n # this order stays on the pending list\r\n new_pending.append(order)\r\n return [new_pending, cancellations]\r\n\r\n\r\n\r\n# one session in the market\r\ndef market_session(sess_id, starttime, endtime, trader_spec, order_schedule, dumpfile, dump_each_trade, verbose):\r\n\r\n\r\n # initialise the exchange\r\n exchange = Exchange()\r\n\r\n\r\n # create a bunch of traders\r\n traders = {}\r\n trader_stats = populate_market(trader_spec, traders, True, verbose)\r\n\r\n\r\n # timestep set so that can process all traders in one second\r\n # NB minimum interarrival time of customer orders may be much less than this!!\r\n timestep = 1.0 / float(trader_stats['n_buyers'] + trader_stats['n_sellers'])\r\n\r\n duration = float(endtime - starttime)\r\n\r\n last_update = -1.0\r\n\r\n time = starttime\r\n\r\n orders_verbose = False\r\n lob_verbose = False\r\n process_verbose = False\r\n respond_verbose = False\r\n bookkeep_verbose = False\r\n\r\n pending_cust_orders = []\r\n\r\n if verbose: print('\\n%s; ' % (sess_id))\r\n\r\n\r\n while time < endtime:\r\n\r\n\r\n\r\n # how much time left, as a percentage?\r\n time_left = (endtime - time) / duration\r\n\r\n # if verbose: print('\\n\\n%s; t=%08.2f (%4.1f/100) ' % (sess_id, time, time_left*100))\r\n\r\n trade = None\r\n\r\n [pending_cust_orders, kills] = customer_orders(time, last_update, traders, trader_stats,\r\n order_schedule, pending_cust_orders, orders_verbose)\r\n\r\n # if any newly-issued customer orders mean quotes on the LOB need to be cancelled, kill them\r\n if len(kills) > 0 :\r\n # if verbose : print('Kills: %s' % (kills))\r\n for kill in kills :\r\n # if verbose : print('lastquote=%s' % traders[kill].lastquote)\r\n if traders[kill].lastquote != None :\r\n # if verbose : print('Killing order %s' % (str(traders[kill].lastquote)))\r\n exchange.del_order(time, traders[kill].lastquote, verbose)\r\n\r\n\r\n # get a limit-order quote (or None) from a randomly chosen trader\r\n tid = list(traders.keys())[random.randint(0, len(traders) - 1)]\r\n order = traders[tid].getorder(time, time_left, exchange.publish_lob(time, lob_verbose))\r\n\r\n # if verbose: print('Trader Quote: %s' % (order))\r\n\r\n if order != None:\r\n if order.otype == 'Ask' and order.price < traders[tid].orders[0].price: sys.exit('Bad ask')\r\n if order.otype == 'Bid' and order.price > traders[tid].orders[0].price: sys.exit('Bad bid')\r\n # send order to exchange\r\n traders[tid].n_quotes = 1\r\n trade = exchange.process_order2(time, order, process_verbose)\r\n if trade != None:\r\n # trade occurred,\r\n # so the counterparties update order lists and blotters\r\n traders[trade['party1']].bookkeep(trade, order, bookkeep_verbose, time)\r\n traders[trade['party2']].bookkeep(trade, order, bookkeep_verbose, time)\r\n if dump_each_trade: trade_stats(sess_id, traders, tdump, time, exchange.publish_lob(time, lob_verbose))\r\n\r\n # traders respond to whatever happened\r\n lob = exchange.publish_lob(time, lob_verbose)\r\n for t in traders:\r\n # NB respond just updates trader's internal variables\r\n # doesn't alter the LOB, so processing each trader in\r\n # sequence (rather than random/shuffle) isn't a problem\r\n traders[t].respond(time, lob, trade, respond_verbose)\r\n\r\n time = time + timestep\r\n\r\n\r\n # end of an experiment -- dump the tape\r\n exchange.tape_dump('transactions.csv', 'w', 'keep')\r\n\r\n\r\n # write trade_stats for this experiment NB end-of-session summary only\r\n trade_stats(sess_id, traders, tdump, time, exchange.publish_lob(time, lob_verbose))\r\n\r\n\r\n\r\n#############################\r\n\r\n# # Below here is where we set up and run a series of experiments\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # set up parameters for the session\r\n\r\n start_time = 0.0\r\n end_time = 330.0\r\n duration = end_time - start_time\r\n\r\n\r\n # schedule_offsetfn returns time-dependent offset on schedule prices\r\n #def schedule_offsetfn(t):\r\n # pi2 = math.pi * 2\r\n # c = math.pi * 3000\r\n # wavelength = t / c\r\n # gradient = 100 * t / (c / pi2)\r\n # amplitude = 100 * t / (c / pi2)\r\n # offset = gradient + amplitude * math.sin(wavelength * t)\r\n # print int(round(offset, 0))\r\n # \treturn int(round(offset, 0))\r\n\r\n def schedule_offsetfn(t):\r\n return int((t % 75)/2)\r\n\r\n # def schedule_offsetfn(t):\r\n # return int(math.sin(t/30))\r\n #\r\n # def schedule_offsetfn(t):\r\n # if (t % 100 < 50):\r\n # return 5\r\n # else:\r\n # return -5\r\n\r\n\r\n# # range1 = (10, 190, schedule_offsetfn)\r\n# # range2 = (200,300, schedule_offsetfn)\r\n\r\n# # supply_schedule = [ {'from':start_time, 'to':duration/3, 'ranges':[range1], 'stepmode':'fixed'},\r\n# # {'from':duration/3, 'to':2*duration/3, 'ranges':[range2], 'stepmode':'fixed'},\r\n# # {'from':2*duration/3, 'to':end_time, 'ranges':[range1], 'stepmode':'fixed'}\r\n# # ]\r\n\r\n\r\n\r\n range_supply1 = (10,50)\r\n range_supply2 = (25,35)\r\n range_supply3 = (10,50)\r\n range_supply4 = (20,60)\r\n supply_schedule = [ {'from':start_time, 'to':end_time, 'ranges':[range_supply1], 'stepmode':'fixed'},\r\n #{'from':180, 'to':330, 'ranges':[range_supply2], 'stepmode':'fixed'},\r\n #{'from':330, 'to':480, 'ranges':[range_supply3], 'stepmode':'fixed'},\r\n #{'from':330, 'to':end_time, 'ranges':[range_supply3], 'stepmode':'fixed'},\r\n ]\r\n\r\n range_demand1 = (10,50)\r\n range_demand2 = (10,50)\r\n range_demand3 = (25,35)\r\n range_demand4 = (20,60)\r\n demand_schedule = [ {'from':start_time, 'to':end_time, 'ranges':[range_demand1], 'stepmode':'fixed'},\r\n #{'from':180, 'to':330, 'ranges':[range_demand2], 'stepmode':'fixed'},\r\n #{'from':330, 'to':480, 'ranges':[range_demand3], 'stepmode':'fixed'},\r\n #{'from':330, 'to':end_time, 'ranges':[range_demand3], 'stepmode':'fixed'},\r\n ]\r\n\r\n order_sched = {'sup':supply_schedule, 'dem':demand_schedule,\r\n 'interval':30, 'timemode':'periodic'}\r\n\r\n # buyers_spec = [('AA',2),('SHVR',10),('ZIC',10),('ZIP',10)]\r\n # sellers_spec = buyers_spec\r\n # traders_spec = {'sellers':sellers_spec, 'buyers':buyers_spec}\r\n #\r\n # # run a sequence of trials, one session per trial\r\n #\r\n # n_trials = 10\r\n # tdump=open('avg_balance.csv','w')\r\n # trial = 1\r\n # if n_trials > 1:\r\n # dump_all = False\r\n # else:\r\n # dump_all = True\r\n #\r\n # while (trial<(n_trials+1)):\r\n # trial_id = 'trial%04d' % trial\r\n # market_session(trial_id, start_time, end_time, traders_spec, order_sched, tdump, False, True)\r\n # tdump.flush()\r\n # trial = trial + 1\r\n # tdump.close()\r\n #\r\n # sys.exit('Done Now')\r\n\r\n\r\n\r\n\r\n # run a sequence of trials that exhaustively varies the ratio of four trader types\r\n # NB this has weakness of symmetric proportions on buyers/sellers -- combinatorics of varying that are quite nasty\r\n\r\n\r\n n_trader_types = 4\r\n equal_ratio_n = 4\r\n n_trials_per_ratio = 15\r\n\r\n n_traders = n_trader_types * equal_ratio_n\r\n\r\n fname = '15_balances_withZIP_M1_periodic.csv'\r\n\r\n tdump = open(fname, 'w')\r\n\r\n min_n = 0\r\n\r\n trialnumber = 1\r\n\r\n tdump.write('%s, %s, ' % ('expid', 'time'))\r\n for f in range(4):\r\n tdump.write('%s, %s, %s, %s, ' % ('type', 'balance', 'number of traders', 'profit per trader'))\r\n tdump.write('\\n');\r\n\r\n # buyers_spec = [('GDX', 11), ('ZIP', 11)]\r\n # sellers_spec = buyers_spec\r\n # traders_spec = {'sellers':sellers_spec, 'buyers':buyers_spec}\r\n # print buyers_spec\r\n # trial = 1\r\n # while trial <= n_trials_per_ratio:\r\n # trial_id = 'trial%07d' % trialnumber\r\n # market_session(trial_id, start_time, end_time, traders_spec,\r\n # order_sched, tdump, False, True)\r\n # tdump.flush()\r\n # trial = trial + 1\r\n # trialnumber = trialnumber + 1\r\n\r\n trdr_1_n = min_n\r\n while trdr_1_n <= n_traders:\r\n trdr_2_n = min_n\r\n while trdr_2_n <= n_traders - trdr_1_n:\r\n trdr_3_n = min_n\r\n while trdr_3_n <= n_traders - (trdr_1_n + trdr_2_n):\r\n trdr_4_n = n_traders - (trdr_1_n + trdr_2_n + trdr_3_n)\r\n if trdr_4_n >= min_n:\r\n buyers_spec = [('AA', trdr_1_n), ('GDX', trdr_2_n),\r\n ('ASAD', trdr_3_n), ('ZIP', trdr_4_n)]\r\n sellers_spec = buyers_spec\r\n traders_spec = {'sellers':sellers_spec, 'buyers':buyers_spec}\r\n # print buyers_spec\r\n trial = 1\r\n while trial <= n_trials_per_ratio:\r\n trial_id = 'trial%07d' % trialnumber\r\n market_session(trial_id, start_time, end_time, traders_spec,\r\n order_sched, tdump, False, True)\r\n tdump.flush()\r\n trial = trial + 1\r\n trialnumber = trialnumber + 1\r\n trdr_3_n += 1\r\n trdr_2_n += 1\r\n trdr_1_n += 1\r\n tdump.close()\r\n\r\n print trialnumber\r\n" }, { "alpha_fraction": 0.4063865542411804, "alphanum_fraction": 0.43036413192749023, "avg_line_length": 31.411985397338867, "blob_id": "83255a01a0a3020b80ebbc21cbba341192caed15", "content_id": "6efa53adefd7205865bc0bc2a036cf0fee335bf4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8925, "license_type": "permissive", "max_line_length": 181, "num_lines": 267, "path": "/ZhenZhang/source/Simple_MLOFI.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "from BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\nclass Trader_Simple_MLOFI(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time):\r\n\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n\r\n self.limit = None\r\n self.job = None\r\n\r\n\r\n # variable for MLOFI\r\n self.last_lob = None;\r\n self.list_OFI = [];\r\n self.list_D = [];\r\n\r\n def cal_level_n_e(self, current_lob, n):\r\n b_n = 0\r\n r_n = 0\r\n a_n = 0\r\n q_n = 0\r\n\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n\r\n if (len(current_lob['bids']['lob']) < n):\r\n b_n = 0\r\n r_n = 0\r\n else:\r\n b_n = current_lob['bids']['lob'][n - 1][0]\r\n r_n = current_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['bids']['lob']) < n):\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n else:\r\n b_n_1 = self.last_lob['bids']['lob'][n - 1][0]\r\n r_n_1 = self.last_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(current_lob['asks']['lob']) < n):\r\n a_n = 0\r\n q_n = 0\r\n else:\r\n a_n = current_lob['asks']['lob'][n - 1][0]\r\n q_n = current_lob['asks']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['asks']['lob']) < n):\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n else:\r\n a_n_1 = self.last_lob['asks']['lob'][n - 1][0]\r\n q_n_1 = self.last_lob['asks']['lob'][n - 1][1]\r\n\r\n delta_w = 0;\r\n\r\n if (b_n > b_n_1):\r\n delta_w = r_n\r\n elif (b_n == b_n_1):\r\n delta_w = r_n - r_n_1\r\n else:\r\n delta_w = -r_n_1\r\n\r\n delta_v = 0\r\n if (a_n > a_n_1):\r\n delta_v = -q_n_1\r\n elif (a_n == a_n_1):\r\n delta_v = q_n - q_n_1\r\n else:\r\n delta_v = q_n\r\n\r\n return delta_w - delta_v\r\n\r\n def cal_e(self, time, lob, trade, verbose):\r\n\r\n level_1 = self.cal_level_n_e(lob, 1)\r\n level_2 = self.cal_level_n_e(lob, 2)\r\n level_3 = self.cal_level_n_e(lob, 3)\r\n e = {\r\n 'level1': level_1,\r\n 'level2': level_2,\r\n 'level3': level_3,\r\n }\r\n # print 'ofi is:'\r\n # print str(e)\r\n self.list_OFI.append(e)\r\n\r\n def cal_depth(self, lob):\r\n level_1 = self.cal_depth_n(lob, 1)\r\n level_2 = self.cal_depth_n(lob, 2)\r\n level_3 = self.cal_depth_n(lob, 3)\r\n d = {\r\n 'level1': level_1,\r\n 'level2': level_2,\r\n 'level3': level_3,\r\n }\r\n # print 'depth is:'\r\n # print str(d)\r\n self.list_D.append(d);\r\n\r\n def cal_depth_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return (r_n + q_n) / 2\r\n\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n if len(self.orders) < 1:\r\n self.active = False\r\n return None\r\n else:\r\n self.limit = self.orders[0].price\r\n otype = self.orders[0].atype\r\n self.job = self.orders[0].atype\r\n ostyle = self.orders[0].astyle\r\n if otype == 'Bid':\r\n if lob['bids']['n'] > 0:\r\n quoteprice = lob['bids']['bestp']\r\n if quoteprice > self.limit:\r\n quoteprice = self.limit\r\n else:\r\n quoteprice = self.limit\r\n else:\r\n if lob['asks']['n'] > 0:\r\n quoteprice = lob['asks']['bestp']\r\n if quoteprice < self.limit:\r\n quoteprice = self.limit\r\n else:\r\n quoteprice = self.limit\r\n def imbalance_alter(quoteprice, lob):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n level_1_ofi_cul = 0\r\n level_2_ofi_cul = 0\r\n level_3_ofi_cul = 0\r\n\r\n n = 1\r\n while (len(self.list_OFI) >= n):\r\n level_1_ofi_cul = level_1_ofi_cul + self.list_OFI[-n]['level1']\r\n level_2_ofi_cul = level_2_ofi_cul + self.list_OFI[-n]['level2']\r\n level_3_ofi_cul = level_3_ofi_cul + self.list_OFI[-n]['level3']\r\n n = n + 1\r\n if (n >= 6): break\r\n\r\n level_1_depth_cul = 0;\r\n level_2_depth_cul = 0;\r\n level_3_depth_cul = 0;\r\n\r\n m = 1\r\n while (len(self.list_D) >= m):\r\n\r\n level_1_depth_cul = level_1_depth_cul + self.list_D[-m]['level1']\r\n level_2_depth_cul = level_2_depth_cul + self.list_D[-m]['level2']\r\n level_3_depth_cul = level_3_depth_cul + self.list_D[-m]['level3']\r\n m = m + 1\r\n if (m >= 4): break\r\n\r\n # if(level_1_depth_cul==0): level_1_depth_cul = 10000\r\n # if(level_2_depth_cul==0): level_2_depth_cul = 10000\r\n # if(level_3_depth_cul==0): level_3_depth_cul = 10000\r\n if m == 1:\r\n level_1_depth_averge = level_1_depth_cul + 1\r\n level_2_depth_averge = level_2_depth_cul + 1\r\n level_3_depth_averge = level_3_depth_cul + 1\r\n\r\n else:\r\n level_1_depth_averge = level_1_depth_cul / (m - 1) + 1\r\n level_2_depth_averge = level_2_depth_cul / (m - 1) + 1\r\n level_3_depth_averge = level_3_depth_cul / (m - 1) + 1\r\n c = 0.5\r\n decay = 0.8\r\n\r\n # print 'level_1_depth_averge is %s'%level_1_depth_averge\r\n # print 'level_2_depth_averge is %s'%level_2_depth_averge\r\n # print 'level_3_depth_averge is %s'%level_3_depth_averge\r\n offset = level_1_ofi_cul * c / level_1_depth_averge + decay * level_2_ofi_cul * c / level_2_depth_averge + decay * decay * level_3_ofi_cul * c / level_3_depth_averge\r\n\r\n # quoteprice_iaa = (quoteprice_aa+offset)*0.9 + 0.1*quoteprice_aa\r\n benchmark = quoteprice;\r\n if(lob['midprice'] != None):\r\n benchmark = lob['midprice']\r\n # print 'midprice is %d' % benchmark\r\n # print 'benchmark = %d' % benchmark\r\n quoteprice_isimple = benchmark + offset\r\n if self.job == 'Bid' and quoteprice_isimple > self.limit:\r\n quoteprice_isimple = self.limit\r\n if self.job == 'Ask' and quoteprice_isimple < self.limit:\r\n quoteprice_isimple = self.limit\r\n\r\n # print 'IAA_MLOFI original quotaprice: %d' % (quoteprice)\r\n # print 'offset is %d'%offset\r\n # print 'level1 ofi is %d'%level_1_ofi_cul\r\n # print 'level2 ofi is %d'%level_2_ofi_cul\r\n # print 'level3 ofi is %d'%level_3_ofi_cul\r\n # print 'level1 depth is %d'%level_1_depth_averge\r\n # print 'level2 depth is %d'%level_2_depth_averge\r\n # print 'level3 depth is %d'%level_3_depth_averge\r\n # print 'offset is %d'%offset\r\n # print 'IAA_MLOFI final quotaprice: %d' % (quoteprice_isimple)\r\n # print 'IAAB_MLOFI JOB IS %s' % self.job\r\n return quoteprice_isimple\r\n\r\n quoteprice_isimple = imbalance_alter(quoteprice, lob)\r\n\r\n order = Order(self.tid,\r\n self.orders[0].atype,\r\n 'LIM',\r\n quoteprice_isimple,\r\n self.orders[0].qty,\r\n time, None, -1)\r\n self.lastquote = order\r\n return order\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n\r\n ## End nicked from ZIP\r\n if (self.last_lob == None):\r\n self.last_lob = lob\r\n else:\r\n # print ''\r\n # print ''\r\n # print 'pre lob'\r\n # print 'bid anon:'\r\n # print self.last_lob['bids']['lob']\r\n # print 'ask anon:'\r\n # print self.last_lob['asks']['lob']\r\n # print 'current lob'\r\n # print 'bid anon:'\r\n # print lob['bids']['lob']\r\n # print 'ask anon:'\r\n # print lob['asks']['lob']\r\n\r\n self.cal_e(time, lob, trade, verbose)\r\n self.cal_depth(lob);\r\n self.last_lob = lob;\r\n\r\n\r\n" }, { "alpha_fraction": 0.7614718675613403, "alphanum_fraction": 0.78311687707901, "avg_line_length": 84.55555725097656, "blob_id": "10fb776316dea4fe589117e145234ab599db494f", "content_id": "599a9886a2901ab83ae5b2191e9390f4b0d8ca73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2310, "license_type": "permissive", "max_line_length": 382, "num_lines": 27, "path": "/README.md", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "<i>NB: in Q3 of 2023, a decade after BSE was first launched, we'll be making BSE2 available in a separate repo. BSE2 is a major refactoring and extension of the original BSE. This, the original BSE repo, will be retained for legacy and reference, and because the code is old and stable and simple; but for advanced usage BSE2 should be the preferred choice once it is available.</i>\n\nBSE, The Bristol Stock Exchange, is a simple minimal simulation of a limit-order-book financial exchange, developed for teaching. The aim is to let students explore writing automated trading strategies that deal with \"Level 2\" market data.\n\nIt is written in Python, is single-threaded and all in one file for ease of use by novices. The file BSEguide.pdf explains much of what is going on and includes an example programming assignment. The Wiki here on the BSE GitHub site holds a copy of the BSEguide text: it may be that the Wiki text is more up to date than the PDF file. \n\nThe code in BSE is based on a large number of simplifying assumptions, chief of which is absolute-zero latency: if a trader issues a new quote, that gets processed by the exchange and all other traders can react to it, in zero time (i.e., before any other quote is issued). \n\nNevertheless, because the BSE system is stochastic it can also be used to introduce issues in the design of experiments and analysis of empirical data.\n\nReal exchanges are much much more complicated than this. \n\nIf you use BSE in your work, please link back to this GitHub page for BSE so that people know where to find the original Python source-code: https://github.com/davecliff/BristolStockExchange, and please also cite the peer-reviewed paper that describes BSE:\n \nCliff, D. (2018). BSE: A Minimal Simulation of a Limit-Order-Book Stock Exchange. In M. Affenzeller, et al. (Eds.), Proceedings 30th European Modeling and Simulation Symposium (EMSS 2018), pp. 194-203. DIME University of Genoa.\n \nWhich you can download from here:\nhttps://research-information.bris.ac.uk/ws/portalfiles/portal/167944812/Cliff_i3M_CRC_formatted_repository.pdf\n\nor here:\nhttps://arxiv.org/abs/1809.06027\n\n\nThe code is open-sourced via the MIT Licence: see the LICENSE file for full text. \n(copied from http://opensource.org/licenses/mit-license.php)\n\nLast update: Dave Cliff, March 23rd 2021.\n" }, { "alpha_fraction": 0.534746527671814, "alphanum_fraction": 0.5444390773773193, "avg_line_length": 42.55682373046875, "blob_id": "01436344582664fe58b275b578464b6b3b7eb957", "content_id": "f51667579bd16e76feeb0eb6dc73a901f3b12193", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95434, "license_type": "permissive", "max_line_length": 129, "num_lines": 2191, "path": "/BSE.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#\n# BSE: The Bristol Stock Exchange\n#\n\n# Version 1.7; September 2022 added PRDE\n# Version 1.6; September 2021 added PRSH\n# Version 1.5; 02 Jan 2021 -- was meant to be the final version before switch to BSE2.x, but that didn't happen :-)\n# Version 1.4; 26 Oct 2020 -- change to Python 3.x\n# Version 1.3; July 21st, 2018 (Python 2.x)\n# Version 1.2; November 17th, 2012 (Python 2.x)\n#\n# Copyright (c) 2012-2022, Dave Cliff\n#\n#\n# ------------------------\n#\n# MIT Open-Source License:\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial\n# portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# ------------------------\n#\n#\n#\n# BSE is a very simple simulation of automated execution traders\n# operating on a very simple model of a limit order book (LOB) exchange\n#\n# major simplifications in this version:\n# (a) only one financial instrument being traded\n# (b) traders can only trade contracts of size 1 (will add variable quantities later)\n# (c) each trader can have max of one order per single orderbook.\n# (d) traders can replace/overwrite earlier orders, and/or can cancel\n# (d) simply processes each order in sequence and republishes LOB to all traders\n# => no issues with exchange processing latency/delays or simultaneously issued orders.\n#\n# NB this code has been written to be readable/intelligible, not efficient!\n\nimport sys\nimport math\nimport random\nimport time as chrono\n\n# a bunch of system constants (globals)\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\nbse_sys_maxprice = 500 # maximum price in the system, in cents/pennies\n# ticksize should be a param of an exchange (so different exchanges have different ticksizes)\n# todo: change this, so ticksize no longer global. \nticksize = 1 # minimum change in price, in cents/pennies\n\n\n# an Order/quote has a trader id, a type (buy/sell) price, quantity, timestamp, and unique i.d.\nclass Order:\n\n def __init__(self, tid, otype, price, qty, time, qid):\n self.tid = tid # trader i.d.\n self.otype = otype # order type\n self.price = price # price\n self.qty = qty # quantity\n self.time = time # timestamp\n self.qid = qid # quote i.d. (unique to each quote)\n\n def __str__(self):\n return '[%s %s P=%03d Q=%s T=%5.2f QID:%d]' % \\\n (self.tid, self.otype, self.price, self.qty, self.time, self.qid)\n\n\n# Orderbook_half is one side of the book: a list of bids or a list of asks, each sorted best-first\n\nclass Orderbook_half:\n\n def __init__(self, booktype, worstprice):\n # booktype: bids or asks?\n self.booktype = booktype\n # dictionary of orders received, indexed by Trader ID\n self.orders = {}\n # limit order book, dictionary indexed by price, with order info\n self.lob = {}\n # anonymized LOB, lists, with only price/qty info\n self.lob_anon = []\n # summary stats\n self.best_price = None\n self.best_tid = None\n self.worstprice = worstprice\n self.session_extreme = None # most extreme price quoted in this session\n self.n_orders = 0 # how many orders?\n self.lob_depth = 0 # how many different prices on lob?\n\n def anonymize_lob(self):\n # anonymize a lob, strip out order details, format as a sorted list\n # NB for asks, the sorting should be reversed\n self.lob_anon = []\n for price in sorted(self.lob):\n qty = self.lob[price][0]\n self.lob_anon.append([price, qty])\n\n def build_lob(self):\n lob_verbose = False\n # take a list of orders and build a limit-order-book (lob) from it\n # NB the exchange needs to know arrival times and trader-id associated with each order\n # returns lob as a dictionary (i.e., unsorted)\n # also builds anonymized version (just price/quantity, sorted, as a list) for publishing to traders\n self.lob = {}\n for tid in self.orders:\n order = self.orders.get(tid)\n price = order.price\n if price in self.lob:\n # update existing entry\n qty = self.lob[price][0]\n orderlist = self.lob[price][1]\n orderlist.append([order.time, order.qty, order.tid, order.qid])\n self.lob[price] = [qty + order.qty, orderlist]\n else:\n # create a new dictionary entry\n self.lob[price] = [order.qty, [[order.time, order.qty, order.tid, order.qid]]]\n # create anonymized version\n self.anonymize_lob()\n # record best price and associated trader-id\n if len(self.lob) > 0:\n if self.booktype == 'Bid':\n self.best_price = self.lob_anon[-1][0]\n else:\n self.best_price = self.lob_anon[0][0]\n self.best_tid = self.lob[self.best_price][1][0][2]\n else:\n self.best_price = None\n self.best_tid = None\n\n if lob_verbose:\n print(self.lob)\n\n def book_add(self, order):\n # add order to the dictionary holding the list of orders\n # either overwrites old order from this trader\n # or dynamically creates new entry in the dictionary\n # so, max of one order per trader per list\n # checks whether length or order list has changed, to distinguish addition/overwrite\n # print('book_add > %s %s' % (order, self.orders))\n\n # if this is an ask, does the price set a new extreme-high record?\n if (self.booktype == 'Ask') and ((self.session_extreme is None) or (order.price > self.session_extreme)):\n self.session_extreme = int(order.price)\n\n # add the order to the book\n n_orders = self.n_orders\n self.orders[order.tid] = order\n self.n_orders = len(self.orders)\n self.build_lob()\n # print('book_add < %s %s' % (order, self.orders))\n if n_orders != self.n_orders:\n return 'Addition'\n else:\n return 'Overwrite'\n\n def book_del(self, order):\n # delete order from the dictionary holding the orders\n # assumes max of one order per trader per list\n # checks that the Trader ID does actually exist in the dict before deletion\n # print('book_del %s',self.orders)\n if self.orders.get(order.tid) is not None:\n del (self.orders[order.tid])\n self.n_orders = len(self.orders)\n self.build_lob()\n # print('book_del %s', self.orders)\n\n def delete_best(self):\n # delete order: when the best bid/ask has been hit, delete it from the book\n # the TraderID of the deleted order is return-value, as counterparty to the trade\n best_price_orders = self.lob[self.best_price]\n best_price_qty = best_price_orders[0]\n best_price_counterparty = best_price_orders[1][0][2]\n if best_price_qty == 1:\n # here the order deletes the best price\n del (self.lob[self.best_price])\n del (self.orders[best_price_counterparty])\n self.n_orders = self.n_orders - 1\n if self.n_orders > 0:\n if self.booktype == 'Bid':\n self.best_price = max(self.lob.keys())\n else:\n self.best_price = min(self.lob.keys())\n self.lob_depth = len(self.lob.keys())\n else:\n self.best_price = self.worstprice\n self.lob_depth = 0\n else:\n # best_bid_qty>1 so the order decrements the quantity of the best bid\n # update the lob with the decremented order data\n self.lob[self.best_price] = [best_price_qty - 1, best_price_orders[1][1:]]\n\n # update the bid list: counterparty's bid has been deleted\n del (self.orders[best_price_counterparty])\n self.n_orders = self.n_orders - 1\n self.build_lob()\n return best_price_counterparty\n\n\n# Orderbook for a single instrument: list of bids and list of asks\n\nclass Orderbook(Orderbook_half):\n\n def __init__(self):\n self.bids = Orderbook_half('Bid', bse_sys_minprice)\n self.asks = Orderbook_half('Ask', bse_sys_maxprice)\n self.tape = []\n self.tape_length = 10000 # max number of events on tape (so we can do millions of orders without crashing)\n self.quote_id = 0 # unique ID code for each quote accepted onto the book\n self.lob_string = '' # character-string linearization of public lob items with nonzero quantities\n\n\n# Exchange's internal orderbook\n\nclass Exchange(Orderbook):\n\n def add_order(self, order, verbose):\n # add a quote/order to the exchange and update all internal records; return unique i.d.\n order.qid = self.quote_id\n self.quote_id = order.qid + 1\n # if verbose : print('QUID: order.quid=%d self.quote.id=%d' % (order.qid, self.quote_id))\n if order.otype == 'Bid':\n response = self.bids.book_add(order)\n best_price = self.bids.lob_anon[-1][0]\n self.bids.best_price = best_price\n self.bids.best_tid = self.bids.lob[best_price][1][0][2]\n else:\n response = self.asks.book_add(order)\n best_price = self.asks.lob_anon[0][0]\n self.asks.best_price = best_price\n self.asks.best_tid = self.asks.lob[best_price][1][0][2]\n return [order.qid, response]\n\n def del_order(self, time, order, verbose):\n # delete a trader's quot/order from the exchange, update all internal records\n if order.otype == 'Bid':\n self.bids.book_del(order)\n if self.bids.n_orders > 0:\n best_price = self.bids.lob_anon[-1][0]\n self.bids.best_price = best_price\n self.bids.best_tid = self.bids.lob[best_price][1][0][2]\n else: # this side of book is empty\n self.bids.best_price = None\n self.bids.best_tid = None\n cancel_record = {'type': 'Cancel', 'time': time, 'order': order}\n self.tape.append(cancel_record)\n # NB this just throws away the older items on the tape -- could instead dump to disk\n # right-truncate the tape so it keeps only the most recent items\n self.tape = self.tape[-self.tape_length:]\n\n elif order.otype == 'Ask':\n self.asks.book_del(order)\n if self.asks.n_orders > 0:\n best_price = self.asks.lob_anon[0][0]\n self.asks.best_price = best_price\n self.asks.best_tid = self.asks.lob[best_price][1][0][2]\n else: # this side of book is empty\n self.asks.best_price = None\n self.asks.best_tid = None\n cancel_record = {'type': 'Cancel', 'time': time, 'order': order}\n self.tape.append(cancel_record)\n # NB this just throws away the older items on the tape -- could instead dump to disk\n # right-truncate the tape so it keeps only the most recent items\n self.tape = self.tape[-self.tape_length:]\n else:\n # neither bid nor ask?\n sys.exit('bad order type in del_quote()')\n\n def process_order2(self, time, order, verbose):\n # receive an order and either add it to the relevant LOB (ie treat as limit order)\n # or if it crosses the best counterparty offer, execute it (treat as a market order)\n oprice = order.price\n counterparty = None\n [qid, response] = self.add_order(order, verbose) # add it to the order lists -- overwriting any previous order\n order.qid = qid\n if verbose:\n print('QUID: order.quid=%d' % order.qid)\n print('RESPONSE: %s' % response)\n best_ask = self.asks.best_price\n best_ask_tid = self.asks.best_tid\n best_bid = self.bids.best_price\n best_bid_tid = self.bids.best_tid\n if order.otype == 'Bid':\n if self.asks.n_orders > 0 and best_bid >= best_ask:\n # bid lifts the best ask\n if verbose:\n print(\"Bid $%s lifts best ask\" % oprice)\n counterparty = best_ask_tid\n price = best_ask # bid crossed ask, so use ask price\n if verbose:\n print('counterparty, price', counterparty, price)\n # delete the ask just crossed\n self.asks.delete_best()\n # delete the bid that was the latest order\n self.bids.delete_best()\n elif order.otype == 'Ask':\n if self.bids.n_orders > 0 and best_ask <= best_bid:\n # ask hits the best bid\n if verbose:\n print(\"Ask $%s hits best bid\" % oprice)\n # remove the best bid\n counterparty = best_bid_tid\n price = best_bid # ask crossed bid, so use bid price\n if verbose:\n print('counterparty, price', counterparty, price)\n # delete the bid just crossed, from the exchange's records\n self.bids.delete_best()\n # delete the ask that was the latest order, from the exchange's records\n self.asks.delete_best()\n else:\n # we should never get here\n sys.exit('process_order() given neither Bid nor Ask')\n # NB at this point we have deleted the order from the exchange's records\n # but the two traders concerned still have to be notified\n if verbose:\n print('counterparty %s' % counterparty)\n if counterparty is not None:\n # process the trade\n if verbose: print('>>>>>>>>>>>>>>>>>TRADE t=%010.3f $%d %s %s' % (time, price, counterparty, order.tid))\n transaction_record = {'type': 'Trade',\n 'time': time,\n 'price': price,\n 'party1': counterparty,\n 'party2': order.tid,\n 'qty': order.qty\n }\n self.tape.append(transaction_record)\n # NB this just throws away the older items on the tape -- could instead dump to disk\n # right-truncate the tape so it keeps only the most recent items\n self.tape = self.tape[-self.tape_length:]\n\n\n return transaction_record\n else:\n return None\n\n # Currently tape_dump only writes a list of transactions (ignores cancellations)\n def tape_dump(self, fname, fmode, tmode):\n dumpfile = open(fname, fmode)\n # dumpfile.write('type, time, price\\n')\n for tapeitem in self.tape:\n if tapeitem['type'] == 'Trade':\n dumpfile.write('Trd, %010.3f, %s\\n' % (tapeitem['time'], tapeitem['price']))\n dumpfile.close()\n if tmode == 'wipe':\n self.tape = []\n\n # this returns the LOB data \"published\" by the exchange,\n # i.e., what is accessible to the traders\n def publish_lob(self, time, lob_file, verbose):\n public_data = {}\n public_data['time'] = time\n public_data['bids'] = {'best': self.bids.best_price,\n 'worst': self.bids.worstprice,\n 'n': self.bids.n_orders,\n 'lob': self.bids.lob_anon}\n public_data['asks'] = {'best': self.asks.best_price,\n 'worst': self.asks.worstprice,\n 'sess_hi': self.asks.session_extreme,\n 'n': self.asks.n_orders,\n 'lob': self.asks.lob_anon}\n public_data['QID'] = self.quote_id\n public_data['tape'] = self.tape\n\n if lob_file is not None:\n # build a linear character-string summary of only those prices on LOB with nonzero quantities\n lobstring ='Bid:,'\n n_bids = len(self.bids.lob_anon)\n if n_bids > 0:\n lobstring += '%d,' % n_bids\n for lobitem in self.bids.lob_anon:\n price_str = '%d,' % lobitem[0]\n qty_str = '%d,' % lobitem[1]\n lobstring = lobstring + price_str + qty_str\n else:\n lobstring += '0,'\n lobstring += 'Ask:,'\n n_asks = len(self.asks.lob_anon)\n if n_asks > 0:\n lobstring += '%d,' % n_asks\n for lobitem in self.asks.lob_anon:\n price_str = '%d,' % lobitem[0]\n qty_str = '%d,' % lobitem[1]\n lobstring = lobstring + price_str + qty_str\n else:\n lobstring += '0,'\n # is this different to the last lob_string?\n if lobstring != self.lob_string:\n # write it\n lob_file.write('%.3f, %s\\n' % (time, lobstring))\n # remember it\n self.lob_string = lobstring\n\n if verbose:\n print('publish_lob: t=%d' % time)\n print('BID_lob=%s' % public_data['bids']['lob'])\n # print('best=%s; worst=%s; n=%s ' % (self.bids.best_price, self.bids.worstprice, self.bids.n_orders))\n print('ASK_lob=%s' % public_data['asks']['lob'])\n # print('qid=%d' % self.quote_id)\n\n return public_data\n\n\n##################--Traders below here--#############\n\n\n# Trader superclass\n# all Traders have a trader id, bank balance, blotter, and list of orders to execute\nclass Trader:\n\n def __init__(self, ttype, tid, balance, params, time):\n self.ttype = ttype # what type / strategy this trader is\n self.tid = tid # trader unique ID code\n self.balance = balance # money in the bank\n self.params = params # parameters/extras associated with this trader-type or individual trader.\n self.blotter = [] # record of trades executed\n self.blotter_length = 100 # maximum length of blotter\n self.orders = [] # customer orders currently being worked (fixed at 1)\n self.n_quotes = 0 # number of quotes live on LOB\n self.birthtime = time # used when calculating age of a trader/strategy\n self.profitpertime = 0 # profit per unit time\n self.n_trades = 0 # how many trades has this trader done?\n self.lastquote = None # record of what its last quote was\n\n\n def __str__(self):\n return '[TID %s type %s balance %s blotter %s orders %s n_trades %s profitpertime %s]' \\\n % (self.tid, self.ttype, self.balance, self.blotter, self.orders, self.n_trades, self.profitpertime)\n\n\n def add_order(self, order, verbose):\n # in this version, trader has at most one order,\n # if allow more than one, this needs to be self.orders.append(order)\n if self.n_quotes > 0:\n # this trader has a live quote on the LOB, from a previous customer order\n # need response to signal cancellation/withdrawal of that quote\n response = 'LOB_Cancel'\n else:\n response = 'Proceed'\n self.orders = [order]\n if verbose:\n print('add_order < response=%s' % response)\n return response\n\n\n def del_order(self, order):\n # this is lazy: assumes each trader has only one customer order with quantity=1, so deleting sole order\n self.orders = []\n\n\n def bookkeep(self, trade, order, verbose, time):\n\n outstr = \"\"\n for order in self.orders:\n outstr = outstr + str(order)\n\n self.blotter.append(trade) # add trade record to trader's blotter\n self.blotter = self.blotter[-self.blotter_length:] # right-truncate to keep to length\n\n # NB What follows is **LAZY** -- assumes all orders are quantity=1\n transactionprice = trade['price']\n if self.orders[0].otype == 'Bid':\n profit = self.orders[0].price - transactionprice\n else:\n profit = transactionprice - self.orders[0].price\n self.balance += profit\n self.n_trades += 1\n self.profitpertime = self.balance / (time - self.birthtime)\n\n if profit < 0:\n print(profit)\n print(trade)\n print(order)\n sys.exit('FAIL: negative profit')\n\n if verbose: print('%s profit=%d balance=%d profit/time=%d' % (outstr, profit, self.balance, self.profitpertime))\n self.del_order(order) # delete the order\n\n\n # specify how trader responds to events in the market\n # this is a null action, expect it to be overloaded by specific algos\n def respond(self, time, lob, trade, verbose):\n return None\n\n\n # specify how trader mutates its parameter values\n # this is a null action, expect it to be overloaded by specific algos\n def mutate(self, time, lob, trade, verbose):\n return None\n\n\n# Trader subclass Giveaway\n# even dumber than a ZI-U: just give the deal away\n# (but never makes a loss)\nclass Trader_Giveaway(Trader):\n\n def getorder(self, time, countdown, lob):\n if len(self.orders) < 1:\n order = None\n else:\n quoteprice = self.orders[0].price\n order = Order(self.tid,\n self.orders[0].otype,\n quoteprice,\n self.orders[0].qty,\n time, lob['QID'])\n self.lastquote = order\n return order\n\n\n# Trader subclass ZI-C\n# After Gode & Sunder 1993\nclass Trader_ZIC(Trader):\n\n def getorder(self, time, countdown, lob):\n if len(self.orders) < 1:\n # no orders: return NULL\n order = None\n else:\n minprice = lob['bids']['worst']\n maxprice = lob['asks']['worst']\n qid = lob['QID']\n limit = self.orders[0].price\n otype = self.orders[0].otype\n if otype == 'Bid':\n quoteprice = random.randint(minprice, limit)\n else:\n quoteprice = random.randint(limit, maxprice)\n # NB should check it == 'Ask' and barf if not\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, qid)\n self.lastquote = order\n return order\n\n\n# Trader subclass Shaver\n# shaves a penny off the best price\n# if there is no best price, creates \"stub quote\" at system max/min\nclass Trader_Shaver(Trader):\n\n def getorder(self, time, countdown, lob):\n if len(self.orders) < 1:\n order = None\n else:\n limitprice = self.orders[0].price\n otype = self.orders[0].otype\n if otype == 'Bid':\n if lob['bids']['n'] > 0:\n quoteprice = lob['bids']['best'] + 1\n if quoteprice > limitprice:\n quoteprice = limitprice\n else:\n quoteprice = lob['bids']['worst']\n else:\n if lob['asks']['n'] > 0:\n quoteprice = lob['asks']['best'] - 1\n if quoteprice < limitprice:\n quoteprice = limitprice\n else:\n quoteprice = lob['asks']['worst']\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, lob['QID'])\n self.lastquote = order\n return order\n\n\n# Trader subclass Sniper\n# Based on Shaver,\n# \"lurks\" until time remaining < threshold% of the trading session\n# then gets increasing aggressive, increasing \"shave thickness\" as time runs out\nclass Trader_Sniper(Trader):\n\n def getorder(self, time, countdown, lob):\n lurk_threshold = 0.2\n shavegrowthrate = 3\n shave = int(1.0 / (0.01 + countdown / (shavegrowthrate * lurk_threshold)))\n if (len(self.orders) < 1) or (countdown > lurk_threshold):\n order = None\n else:\n limitprice = self.orders[0].price\n otype = self.orders[0].otype\n\n if otype == 'Bid':\n if lob['bids']['n'] > 0:\n quoteprice = lob['bids']['best'] + shave\n if quoteprice > limitprice:\n quoteprice = limitprice\n else:\n quoteprice = lob['bids']['worst']\n else:\n if lob['asks']['n'] > 0:\n quoteprice = lob['asks']['best'] - shave\n if quoteprice < limitprice:\n quoteprice = limitprice\n else:\n quoteprice = lob['asks']['worst']\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, lob['QID'])\n self.lastquote = order\n return order\n\n\n# Trader subclass PRZI (ticker: PRSH)\n# added 6 Sep 2022 -- replaces old PRZI and PRZI_SHC, unifying them into one function and also adding PRDE\n#\n# Dave Cliff's Parameterized-Response Zero-Intelligence (PRZI) trader -- pronounced \"prezzie\"\n# but with added adaptive strategies, currently either...\n# ++ a k-point Stochastic Hill-Climber (SHC) hence PRZI-SHC,\n# PRZI-SHC pronounced \"prezzy-shuck\". Ticker symbol PRSH pronounced \"purrsh\";\n# or\n# ++ a simple differential evolution (DE) optimizer with pop_size=k, hence PRZE-DE or PRDE ('purdy\")\n#\n# when optimizer == None then it implements plain-vanilla non-adaptive PRZI, with a fixed strategy-value.\n\nclass Trader_PRZI(Trader):\n\n # how to mutate the strategy values when evolving / hill-climbing\n def mutate_strat(self, s, mode):\n s_min = self.strat_range_min\n s_max = self.strat_range_max\n if mode == 'gauss':\n sdev = 0.05\n newstrat = s\n while newstrat == s:\n newstrat = s + random.gauss(0.0, sdev)\n # truncate to keep within range\n newstrat = max(-1.0, min(1.0, newstrat))\n elif mode == 'uniform_whole_range':\n # draw uniformly from whole range\n newstrat = random.uniform(-1.0, +1.0)\n elif mode == 'uniform_bounded_range':\n # draw uniformly from bounded range\n newstrat = random.uniform(s_min, s_max)\n else:\n sys.exit('FAIL: bad mode in mutate_strat')\n return newstrat\n\n\n def strat_str(self):\n # pretty-print a string summarising this trader's strategies\n string = '%s: %s active_strat=[%d]:\\n' % (self.tid, self.ttype, self.active_strat)\n for s in range(0, self.k):\n strat = self.strats[s]\n stratstr = '[%d]: s=%+f, start=%f, $=%f, pps=%f\\n' % \\\n (s, strat['stratval'], strat['start_t'], strat['profit'], strat['pps'])\n string = string + stratstr\n\n return string\n\n\n def __init__(self, ttype, tid, balance, params, time):\n # if params == \"landscape-mapper\" then it generates data for mapping the fitness landscape\n\n verbose = True\n\n Trader.__init__(self, ttype, tid, balance, params, time)\n\n # unpack the params\n # for all three of PRZI, PRSH, and PRDE params can include strat_min and strat_max\n # for PRSH and PRDE params should include values for optimizer and k\n # if no params specified then defaults to PRZI with strat values in [-1.0,+1.0]\n\n # default parameter values\n k = 1\n optimizer = None # no optimizer => plain non-adaptive PRZI\n s_min = -1.0\n s_max = +1.0\n \n # did call provide different params?\n if type(params) is dict:\n if 'k' in params:\n k = params['k']\n if 'optimizer' in params:\n optimizer = params['optimizer']\n s_min = params['strat_min']\n s_max = params['strat_max']\n \n self.optmzr = optimizer # this determines whether it's PRZI, PRSH, or PRDE\n self.k = k # number of sampling points (cf number of arms on a multi-armed-bandit, or pop-size)\n self.theta0 = 100 # threshold-function limit value\n self.m = 4 # tangent-function multiplier\n self.strat_wait_time = 7200 # how many secs do we give any one strat before switching?\n self.strat_range_min = s_min # lower-bound on randomly-assigned strategy-value\n self.strat_range_max = s_max # upper-bound on randomly-assigned strategy-value\n self.active_strat = 0 # which of the k strategies are we currently playing? -- start with 0\n self.prev_qid = None # previous order i.d.\n self.strat_eval_time = self.k * self.strat_wait_time # time to cycle through evaluating all k strategies\n self.last_strat_change_time = time # what time did we last change strategies?\n self.profit_epsilon = 0.0 * random.random() # minimum profit-per-sec difference between strategies that counts\n self.strats = [] # strategies awaiting initialization\n self.pmax = None # this trader's estimate of the maximum price the market will bear\n self.pmax_c_i = math.sqrt(random.randint(1,10)) # multiplier coefficient when estimating p_max\n self.mapper_outfile = None\n # differential evolution parameters all in one dictionary\n self.diffevol = {'de_state': 'active_s0', # initial state: strategy 0 is active (being evaluated)\n 's0_index': self.active_strat, # s0 starts out as active strat\n 'snew_index': self.k, # (k+1)th item of strategy list is DE's new strategy\n 'snew_stratval': None, # assigned later\n 'F': 0.8 # differential weight -- usually between 0 and 2\n }\n\n start_time = time\n profit = 0.0\n profit_per_second = 0\n lut_bid = None\n lut_ask = None\n\n for s in range(self.k + 1):\n # initialise each of the strategies in sequence: \n # for PRZI: only one strategy is needed\n # for PRSH, one random initial strategy, then k-1 mutants of that initial strategy\n # for PRDE, use draws from uniform distbn over whole range and a (k+1)th strategy is needed to hold s_new\n if s == 0:\n strategy = random.uniform(self.strat_range_min, self.strat_range_max)\n else:\n if self.optmzr == 'PRSH':\n # simple stochastic hill climber: cluster other strats around strat_0\n strategy = self.mutate_strat(self.strats[0]['stratval'], 'gauss') # mutant of strats[0]\n elif self.optmzr == 'PRDE':\n # differential evolution: seed initial strategies across whole space\n strategy = self.mutate_strat(self.strats[0]['stratval'], 'uniform_bounded_range')\n else:\n # PRZI -- do nothing\n pass\n self.strats.append({'stratval': strategy, 'start_t': start_time,\n 'profit': profit, 'pps': profit_per_second, 'lut_bid': lut_bid, 'lut_ask': lut_ask})\n if self.optmzr is None:\n # PRZI -- so we stop after one iteration\n break\n elif self.optmzr == 'PRSH' and s == self.k - 1:\n # PRSH -- doesn't need the (k+1)th strategy\n break\n\n if self.params == 'landscape-mapper':\n # replace seed+mutants set of strats with regularly-spaced strategy values over the whole range\n self.strats = []\n strategy_delta = 0.01\n strategy = -1.0\n k = 0\n self.strats = []\n while strategy <= +1.0:\n self.strats.append({'stratval': strategy, 'start_t': start_time,\n 'profit': profit, 'pps': profit_per_second, 'lut_bid': lut_bid, 'lut_ask': lut_ask})\n k += 1\n strategy += strategy_delta\n self.mapper_outfile = open('landscape_map.csv', 'w')\n self.k = k\n self.strat_eval_time = self.k * self.strat_wait_time\n\n if verbose:\n print(\"%s\\n\" % self.strat_str())\n\n\n def getorder(self, time, countdown, lob):\n\n # shvr_price tells us what price a SHVR would quote in these circs\n def shvr_price(otype, limit, lob):\n\n if otype == 'Bid':\n if lob['bids']['n'] > 0:\n shvr_p = lob['bids']['best'] + ticksize # BSE ticksize is global var\n if shvr_p > limit:\n shvr_p = limit\n else:\n shvr_p = lob['bids']['worst']\n else:\n if lob['asks']['n'] > 0:\n shvr_p = lob['asks']['best'] - ticksize # BSE ticksize is global var\n if shvr_p < limit:\n shvr_p = limit\n else:\n shvr_p = lob['asks']['worst']\n\n # print('shvr_p=%f; ' % shvr_p)\n return shvr_p\n\n\n # calculate cumulative distribution function (CDF) look-up table (LUT)\n def calc_cdf_lut(strat, t0, m, dirn, pmin, pmax):\n # set parameter values and calculate CDF LUT\n # strat is strategy-value in [-1,+1]\n # t0 and m are constants used in the threshold function\n # dirn is direction: 'buy' or 'sell'\n # pmin and pmax are bounds on discrete-valued price-range\n\n # the threshold function used to clip\n def threshold(theta0, x):\n t = max(-1*theta0, min(theta0, x))\n return t\n\n epsilon = 0.000001 #used to catch DIV0 errors\n verbose = False\n\n if (strat > 1.0) or (strat < -1.0):\n # out of range\n sys.exit('PRSH FAIL: strat=%f out of range\\n' % strat)\n\n if (dirn != 'buy') and (dirn != 'sell'):\n # out of range\n sys.exit('PRSH FAIL: bad dirn=%s\\n' % dirn)\n\n if pmax < pmin:\n # screwed\n sys.exit('PRSH FAIL: pmax %f < pmin %f \\n' % (pmax, pmin))\n\n if verbose:\n print('PRSH calc_cdf_lut: strat=%f dirn=%d pmin=%d pmax=%d\\n' % (strat, dirn, pmin, pmax))\n\n p_range = float(pmax - pmin)\n if p_range < 1:\n # special case: the SHVR-style strategy has shaved all the way to the limit price\n # the lower and upper bounds on the interval are adjacent prices;\n # so cdf is simply the limit-price with probability 1\n\n if dirn == 'buy':\n cdf = [{'price':pmax, 'cum_prob': 1.0}]\n else: # must be a sell\n cdf = [{'price': pmin, 'cum_prob': 1.0}]\n\n if verbose:\n print('\\n\\ncdf:', cdf)\n\n return {'strat': strat, 'dirn': dirn, 'pmin': pmin, 'pmax': pmax, 'cdf_lut': cdf}\n\n c = threshold(t0, m * math.tan(math.pi * (strat + 0.5)))\n\n # catch div0 errors here\n if abs(c) < epsilon:\n if c > 0:\n c = epsilon\n else:\n c = -epsilon\n\n e2cm1 = math.exp(c) - 1\n\n # calculate the discrete calligraphic-P function over interval [pmin, pmax]\n # (i.e., this is Equation 8 in the PRZI Technical Note)\n calp_interval = []\n calp_sum = 0\n for p in range(pmin, pmax + 1):\n # normalize the price to proportion of its range\n p_r = (p - pmin) / (p_range) # p_r in [0.0, 1.0]\n if strat == 0.0:\n # special case: this is just ZIC\n cal_p = 1 / (p_range + 1)\n elif strat > 0:\n if dirn == 'buy':\n cal_p = (math.exp(c * p_r) - 1.0) / e2cm1\n else: # dirn == 'sell'\n cal_p = (math.exp(c * (1 - p_r)) - 1.0) / e2cm1\n else: # self.strat < 0\n if dirn == 'buy':\n cal_p = 1.0 - ((math.exp(c * p_r) - 1.0) / e2cm1)\n else: # dirn == 'sell'\n cal_p = 1.0 - ((math.exp(c * (1 - p_r)) - 1.0) / e2cm1)\n\n if cal_p < 0:\n cal_p = 0 # just in case\n\n calp_interval.append({'price':p, \"cal_p\":cal_p})\n calp_sum += cal_p\n\n if calp_sum <= 0:\n print('calp_interval:', calp_interval)\n print('pmin=%f, pmax=%f, calp_sum=%f' % (pmin, pmax, calp_sum))\n\n cdf = []\n cum_prob = 0\n # now go thru interval summing and normalizing to give the CDF\n for p in range(pmin, pmax + 1):\n cal_p = calp_interval[p-pmin]['cal_p']\n prob = cal_p / calp_sum\n cum_prob += prob\n cdf.append({'price': p, 'cum_prob': cum_prob})\n\n if verbose:\n print('\\n\\ncdf:', cdf)\n\n return {'strat':strat, 'dirn':dirn, 'pmin':pmin, 'pmax':pmax, 'cdf_lut':cdf}\n\n verbose = False\n\n if verbose:\n print('t=%.1f PRSH getorder: %s, %s' % (time, self.tid, self.strat_str()))\n\n if len(self.orders) < 1:\n # no orders: return NULL\n order = None\n else:\n # unpack the assignment-order\n limit = self.orders[0].price\n otype = self.orders[0].otype\n qid = self.orders[0].qid\n\n if self.prev_qid is None:\n self.prev_qid = qid\n\n if qid != self.prev_qid:\n # customer-order i.d. has changed, so we're working a new customer-order now\n # this is the time to switch arms\n # print(\"New order! (how does it feel?)\")\n dummy = 1\n\n # get extreme limits on price interval\n # lowest price the market will bear\n minprice = int(lob['bids']['worst']) # default assumption: worst bid price possible as defined by exchange\n\n # trader's individual estimate highest price the market will bear\n maxprice = self.pmax # default assumption\n if self.pmax is None:\n maxprice = int(limit * self.pmax_c_i + 0.5) # in the absence of any other info, guess\n self.pmax = maxprice\n elif lob['asks']['sess_hi'] is not None:\n if self.pmax < lob['asks']['sess_hi']: # some other trader has quoted higher than I expected\n maxprice = lob['asks']['sess_hi'] # so use that as my new estimate of highest\n self.pmax = maxprice\n\n # use the cdf look-up table\n # cdf_lut is a list of little dictionaries\n # each dictionary has form: {'cum_prob':nnn, 'price':nnn}\n # generate u=U(0,1) uniform disrtibution\n # starting with the lowest nonzero cdf value at cdf_lut[0],\n # walk up the lut (i.e., examine higher cumulative probabilities),\n # until we're in the range of u; then return the relevant price\n\n strat = self.strats[self.active_strat]['stratval']\n\n # what price would a SHVR quote?\n p_shvr = shvr_price(otype, limit, lob)\n\n if otype == 'Bid':\n\n p_max = int(limit)\n if strat > 0.0:\n p_min = minprice\n else:\n # shade the lower bound on the interval\n # away from minprice and toward shvr_price\n p_min = int(0.5 + (-strat * p_shvr) + ((1.0 + strat) * minprice))\n\n lut_bid = self.strats[self.active_strat]['lut_bid']\n if (lut_bid is None) or \\\n (lut_bid['strat'] != strat) or\\\n (lut_bid['pmin'] != p_min) or \\\n (lut_bid['pmax'] != p_max):\n # need to compute a new LUT\n if verbose:\n print('New bid LUT')\n self.strats[self.active_strat]['lut_bid'] = calc_cdf_lut(strat, self.theta0, self.m, 'buy', p_min, p_max)\n\n lut = self.strats[self.active_strat]['lut_bid']\n\n else: # otype == 'Ask'\n\n p_min = int(limit)\n if strat > 0.0:\n p_max = maxprice\n else:\n # shade the upper bound on the interval\n # away from maxprice and toward shvr_price\n p_max = int(0.5 + (-strat * p_shvr) + ((1.0 + strat) * maxprice))\n if p_max < p_min:\n # this should never happen, but just in case it does...\n p_max = p_min\n\n\n lut_ask = self.strats[self.active_strat]['lut_ask']\n if (lut_ask is None) or \\\n (lut_ask['strat'] != strat) or \\\n (lut_ask['pmin'] != p_min) or \\\n (lut_ask['pmax'] != p_max):\n # need to compute a new LUT\n if verbose:\n print('New ask LUT')\n self.strats[self.active_strat]['lut_ask'] = calc_cdf_lut(strat, self.theta0, self.m, 'sell', p_min, p_max)\n\n lut = self.strats[self.active_strat]['lut_ask']\n\n \n verbose = False\n if verbose:\n print('PRZI strat=%f LUT=%s \\n \\n' % (strat, lut))\n # useful in debugging: print a table of lut: price and cum_prob, with the discrete derivative (gives PMF).\n last_cprob = 0.0\n for lut_entry in lut['cdf_lut']:\n cprob = lut_entry['cum_prob']\n print('%d, %f, %f' % (lut_entry['price'], cprob - last_cprob, cprob))\n last_cprob = cprob\n print('\\n'); \n \n # print ('[LUT print suppressed]')\n \n # do inverse lookup on the LUT to find the price\n u = random.random()\n for entry in lut['cdf_lut']:\n if u < entry['cum_prob']:\n quoteprice = entry['price']\n break\n\n order = Order(self.tid, otype, quoteprice, self.orders[0].qty, time, lob['QID'])\n\n self.lastquote = order\n\n return order\n\n\n def bookkeep(self, trade, order, verbose, time):\n\n outstr = \"\"\n for order in self.orders:\n outstr = outstr + str(order)\n\n self.blotter.append(trade) # add trade record to trader's blotter\n self.blotter = self.blotter[-self.blotter_length:] # right-truncate to keep to length\n\n # NB What follows is **LAZY** -- assumes all orders are quantity=1\n transactionprice = trade['price']\n if self.orders[0].otype == 'Bid':\n profit = self.orders[0].price - transactionprice\n else:\n profit = transactionprice - self.orders[0].price\n self.balance += profit\n self.n_trades += 1\n self.profitpertime = self.balance / (time - self.birthtime)\n\n if profit < 0:\n print(profit)\n print(trade)\n print(order)\n sys.exit('PRSH FAIL: negative profit')\n\n if verbose: print('%s profit=%d balance=%d profit/time=%d' % (outstr, profit, self.balance, self.profitpertime))\n self.del_order(order) # delete the order\n\n self.strats[self.active_strat]['profit'] += profit\n time_alive = time - self.strats[self.active_strat]['start_t']\n if time_alive > 0:\n profit_per_second = self.strats[self.active_strat]['profit'] / time_alive\n self.strats[self.active_strat]['pps'] = profit_per_second\n else:\n # if it trades at the instant it is born then it would have infinite profit-per-second, which is insane\n # to keep things sensible whne time_alive == 0 we say the profit per second is whatever the actual profit is\n self.strats[self.active_strat]['pps'] = profit\n\n\n # PRSH respond() asks/answers two questions\n # do we need to choose a new strategy? (i.e. have just completed/cancelled previous customer order)\n # do we need to dump one arm and generate a new one? (i.e., both/all arms have been evaluated enough)\n def respond(self, time, lob, trade, verbose):\n\n # \"PRSH\" is a very basic form of stochastic hill-climber (SHC) that's v easy to understand and to code\n # it cycles through the k different strats until each has been operated for at least eval_time seconds\n # but a strat that does nothing will get swapped out if it's been running for no_deal_time without a deal\n # then the strats with the higher total accumulated profit is retained,\n # and mutated versions of it are copied into the other k-1 strats\n # then all counters are reset, and this is repeated indefinitely\n #\n # \"PRDE\" uses a basic form of Differential Evolution. This maintains a population of at least four strats\n # iterates indefinitely on:\n # shuffle the set of strats;\n # name the first four strats s0 to s3;\n # create new_strat=s1+f*(s2-s3);\n # evaluate fitness of s0 and new_strat;\n # if (new_strat fitter than s0) then new_strat replaces s0.\n #\n # todo: add in other optimizer algorithms that are cleverer than these\n # e.g. inspired by multi-arm-bandit algos like like epsilon-greedy, softmax, or upper confidence bound (UCB)\n\n verbose = False\n\n # first update each strategy's profit-per-second (pps) value -- this is the \"fitness\" of each strategy\n for s in self.strats:\n # debugging check: make profit be directly proportional to strategy, no noise\n # s['profit'] = 100 * abs(s['stratval'])\n # update pps\n pps_time = time - s['start_t']\n if pps_time > 0:\n s['pps'] = s['profit'] / pps_time\n else:\n s['pps'] = s['profit']\n\n\n if self.optmzr == 'PRSH':\n\n if verbose:\n # print('t=%f %s PRSH respond: shc_algo=%s eval_t=%f max_wait_t=%f' %\n # (time, self.tid, shc_algo, self.strat_eval_time, self.strat_wait_time))\n dummy = 1\n\n # do we need to swap strategies?\n # this is based on time elapsed since last reset -- waiting for the current strategy to get a deal\n # -- otherwise a hopeless strategy can just sit there for ages doing nothing,\n # which would disadvantage the *other* strategies because they would never get a chance to score any profit.\n\n # NB this *cycles* through the available strats in sequence\n\n s = self.active_strat\n time_elapsed = time - self.last_strat_change_time\n if time_elapsed > self.strat_wait_time:\n # we have waited long enough: swap to another strategy\n\n new_strat = s + 1\n if new_strat > self.k - 1:\n new_strat = 0\n\n self.active_strat = new_strat\n self.last_strat_change_time = time\n\n if verbose:\n print('t=%f %s PRSH respond: strat[%d] elapsed=%f; wait_t=%f, switched to strat=%d' %\n (time, self.tid, s, time_elapsed, self.strat_wait_time, new_strat))\n\n # code below here deals with creating a new set of k-1 mutants from the best of the k strats\n\n # assume that all strats have had long enough, and search for evidence to the contrary\n all_old_enough = True\n for s in self.strats:\n lifetime = time - s['start_t']\n if lifetime < self.strat_eval_time:\n all_old_enough = False\n break\n\n if all_old_enough:\n # all strategies have had long enough: which has made most profit?\n\n # sort them by profit\n strats_sorted = sorted(self.strats, key = lambda k: k['pps'], reverse = True)\n # strats_sorted = self.strats # use this as a control: unsorts the strats, gives pure random walk.\n\n if verbose:\n print('PRSH %s: strat_eval_time=%f, all_old_enough=True' % (self.tid, self.strat_eval_time))\n for s in strats_sorted:\n print('s=%f, start_t=%f, lifetime=%f, $=%f, pps=%f' %\n (s['stratval'], s['start_t'], time-s['start_t'], s['profit'], s['pps']))\n\n if self.params == 'landscape-mapper':\n for s in self.strats:\n self.mapper_outfile.write('time, %f, strat, %f, pps, %f\\n' %\n (time, s['stratval'], s['pps']))\n self.mapper_outfile.flush()\n sys.exit()\n\n else:\n # if the difference between the top two strats is too close to call then flip a coin\n # this is to prevent the same good strat being held constant simply by chance cos it is at index [0]\n best_strat = 0\n prof_diff = strats_sorted[0]['pps'] - strats_sorted[1]['pps']\n if abs(prof_diff) < self.profit_epsilon:\n # they're too close to call, so just flip a coin\n best_strat = random.randint(0,1)\n\n if best_strat == 1:\n # need to swap strats[0] and strats[1]\n tmp_strat = strats_sorted[0]\n strats_sorted[0] = strats_sorted[1]\n strats_sorted[1] = tmp_strat\n\n # the sorted list of strats replaces the existing list\n self.strats = strats_sorted\n\n # at this stage, strats_sorted[0] is our newly-chosen elite-strat, about to replicate\n\n # now replicate and mutate the elite into all the other strats\n for s in range(1, self.k): # note range index starts at one not zero (elite is at [0])\n self.strats[s]['stratval'] = self.mutate_strat(self.strats[0]['stratval'], 'gauss')\n self.strats[s]['start_t'] = time\n self.strats[s]['profit'] = 0.0\n self.strats[s]['pps'] = 0.0\n # and then update (wipe) records for the elite\n self.strats[0]['start_t'] = time\n self.strats[0]['profit'] = 0.0\n self.strats[0]['pps'] = 0.0\n self.active_strat = 0\n\n if verbose:\n print('%s: strat_eval_time=%f, MUTATED:' % (self.tid, self.strat_eval_time))\n for s in self.strats:\n print('s=%f start_t=%f, lifetime=%f, $=%f, pps=%f' %\n (s['stratval'], s['start_t'], time-s['start_t'], s['profit'], s['pps']))\n\n elif self.optmzr == 'PRDE':\n # simple differential evolution\n\n # only initiate diff-evol once the active strat has been evaluated for long enough\n actv_lifetime = time - self.strats[self.active_strat]['start_t']\n if actv_lifetime >= self.strat_wait_time:\n\n if self.k < 4:\n sys.exit('FAIL: k too small for diffevol')\n\n if self.diffevol['de_state'] == 'active_s0':\n # we've evaluated s0, so now we need to evaluate s_new\n self.active_strat = self.diffevol['snew_index']\n self.strats[self.active_strat]['start_t'] = time\n self.strats[self.active_strat]['profit'] = 0.0\n self.strats[self.active_strat]['pps'] = 0.0\n\n self.diffevol['de_state'] = 'active_snew'\n\n elif self.diffevol['de_state'] == 'active_snew':\n # now we've evaluated s_0 and s_new, so we can do DE adaptive step\n if verbose:\n print('PRDE trader %s' % self.tid)\n i_0 = self.diffevol['s0_index']\n i_new = self.diffevol['snew_index']\n fit_0 = self.strats[i_0]['pps']\n fit_new = self.strats[i_new]['pps']\n\n if verbose:\n print('DiffEvol: t=%.1f, i_0=%d, i0fit=%f, i_new=%d, i_new_fit=%f' % (time, i_0, fit_0, i_new, fit_new))\n\n if fit_new >= fit_0:\n # new strat did better than old strat0, so overwrite new into strat0\n self.strats[i_0]['stratval'] = self.strats[i_new]['stratval']\n\n # do differential evolution\n\n # pick four individual strategies at random, but they must be distinct\n stratlist = list(range(0, self.k)) # create sequential list of strategy-numbers\n random.shuffle(stratlist) # shuffle the list\n\n # s0 is next iteration's candidate for possible replacement\n self.diffevol['s0_index'] = stratlist[0]\n\n # s1, s2, s3 used in DE to create new strategy, potential replacement for s0\n s1_index = stratlist[1]\n s2_index = stratlist[2]\n s3_index = stratlist[3]\n\n # unpack the actual strategy values\n s1_stratval = self.strats[s1_index]['stratval']\n s2_stratval = self.strats[s2_index]['stratval']\n s3_stratval = self.strats[s3_index]['stratval']\n\n # this is the differential evolution \"adaptive step\": create a new individual\n new_stratval = s1_stratval + self.diffevol['F'] * (s2_stratval - s3_stratval)\n\n # clip to bounds\n new_stratval = max(-1, min(+1, new_stratval))\n\n # record it for future use (s0 will be evaluated first, then s_new)\n self.strats[self.diffevol['snew_index']]['stratval'] = new_stratval\n\n if verbose:\n print('DiffEvol: t=%.1f, s0=%d, s1=%d, (s=%+f), s2=%d, (s=%+f), s3=%d, (s=%+f), sNew=%+f' %\n (time, self.diffevol['s0_index'],\n s1_index, s1_stratval, s2_index, s2_stratval, s3_index, s3_stratval, new_stratval))\n\n # DC's intervention for fully converged populations\n # is the stddev of the strategies in the population equal/close to zero?\n sum = 0.0\n for s in range(self.k):\n sum += self.strats[s]['stratval']\n strat_mean = sum / self.k\n sumsq = 0.0\n for s in range(self.k):\n diff = self.strats[s]['stratval'] - strat_mean\n sumsq += (diff * diff)\n strat_stdev = math.sqrt(sumsq / self.k)\n if verbose:\n print('t=,%.1f, MeanStrat=, %+f, stdev=,%f' % (time, strat_mean, strat_stdev))\n if strat_stdev < 0.0001:\n # this population has converged\n # mutate one strategy at random\n randindex = random.randint(0, self.k - 1)\n self.strats[randindex]['stratval'] = random.uniform(-1.0, +1.0)\n if verbose:\n print('Converged pop: set strategy %d to %+f' % (randindex, self.strats[randindex]['stratval']))\n\n # set up next iteration: first evaluate s0\n self.active_strat = self.diffevol['s0_index']\n self.strats[self.active_strat]['start_t'] = time\n self.strats[self.active_strat]['profit'] = 0.0\n self.strats[self.active_strat]['pps'] = 0.0\n\n self.diffevol['de_state'] = 'active_s0'\n\n else:\n sys.exit('FAIL: self.diffevol[\\'de_state\\'] not recognized')\n\n elif self.optmzr is None:\n # this is PRZI -- nonadaptive, no optimizer, nothing to change here.\n pass\n\n else:\n sys.exit('FAIL: bad value for self.optmzr')\n\n\nclass Trader_ZIP(Trader):\n\n # ZIP init key param-values are those used in Cliff's 1997 original HP Labs tech report\n # NB this implementation keeps separate margin values for buying & selling,\n # so a single trader can both buy AND sell\n # -- in the original, traders were either buyers OR sellers\n\n def __init__(self, ttype, tid, balance, params, time):\n Trader.__init__(self, ttype, tid, balance, params, time)\n self.willing = 1\n self.able = 1\n self.job = None # this gets switched to 'Bid' or 'Ask' depending on order-type\n self.active = False # gets switched to True while actively working an order\n self.prev_change = 0 # this was called last_d in Cliff'97\n self.beta = 0.1 + 0.4 * random.random()\n self.momntm = 0.1 * random.random()\n self.ca = 0.05 # self.ca & .cr were hard-coded in '97 but parameterised later\n self.cr = 0.05\n self.margin = None # this was called profit in Cliff'97\n self.margin_buy = -1.0 * (0.05 + 0.3 * random.random())\n self.margin_sell = 0.05 + 0.3 * random.random()\n self.price = None\n self.limit = None\n # memory of best price & quantity of best bid and ask, on LOB on previous update\n self.prev_best_bid_p = None\n self.prev_best_bid_q = None\n self.prev_best_ask_p = None\n self.prev_best_ask_q = None\n\n def getorder(self, time, countdown, lob):\n if len(self.orders) < 1:\n self.active = False\n order = None\n else:\n self.active = True\n self.limit = self.orders[0].price\n self.job = self.orders[0].otype\n if self.job == 'Bid':\n # currently a buyer (working a bid order)\n self.margin = self.margin_buy\n else:\n # currently a seller (working a sell order)\n self.margin = self.margin_sell\n quoteprice = int(self.limit * (1 + self.margin))\n self.price = quoteprice\n\n order = Order(self.tid, self.job, quoteprice, self.orders[0].qty, time, lob['QID'])\n self.lastquote = order\n return order\n\n # update margin on basis of what happened in market\n def respond(self, time, lob, trade, verbose):\n # ZIP trader responds to market events, altering its margin\n # does this whether it currently has an order to work or not\n\n def target_up(price):\n # generate a higher target price by randomly perturbing given price\n ptrb_abs = self.ca * random.random() # absolute shift\n ptrb_rel = price * (1.0 + (self.cr * random.random())) # relative shift\n target = int(round(ptrb_rel + ptrb_abs, 0))\n # # print('TargetUp: %d %d\\n' % (price,target))\n return target\n\n def target_down(price):\n # generate a lower target price by randomly perturbing given price\n ptrb_abs = self.ca * random.random() # absolute shift\n ptrb_rel = price * (1.0 - (self.cr * random.random())) # relative shift\n target = int(round(ptrb_rel - ptrb_abs, 0))\n # # print('TargetDn: %d %d\\n' % (price,target))\n return target\n\n def willing_to_trade(price):\n # am I willing to trade at this price?\n willing = False\n if self.job == 'Bid' and self.active and self.price >= price:\n willing = True\n if self.job == 'Ask' and self.active and self.price <= price:\n willing = True\n return willing\n\n def profit_alter(price):\n oldprice = self.price\n diff = price - oldprice\n change = ((1.0 - self.momntm) * (self.beta * diff)) + (self.momntm * self.prev_change)\n self.prev_change = change\n newmargin = ((self.price + change) / self.limit) - 1.0\n\n if self.job == 'Bid':\n if newmargin < 0.0:\n self.margin_buy = newmargin\n self.margin = newmargin\n else:\n if newmargin > 0.0:\n self.margin_sell = newmargin\n self.margin = newmargin\n\n # set the price from limit and profit-margin\n self.price = int(round(self.limit * (1.0 + self.margin), 0))\n\n # # print('old=%d diff=%d change=%d price = %d\\n' % (oldprice, diff, change, self.price))\n\n # what, if anything, has happened on the bid LOB?\n bid_improved = False\n bid_hit = False\n lob_best_bid_p = lob['bids']['best']\n lob_best_bid_q = None\n if lob_best_bid_p is not None:\n # non-empty bid LOB\n lob_best_bid_q = lob['bids']['lob'][-1][1]\n if (self.prev_best_bid_p is not None) and (self.prev_best_bid_p < lob_best_bid_p):\n # best bid has improved\n # NB doesn't check if the improvement was by self\n bid_improved = True\n elif trade is not None and ((self.prev_best_bid_p > lob_best_bid_p) or (\n (self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\n # previous best bid was hit\n bid_hit = True\n elif self.prev_best_bid_p is not None:\n # the bid LOB has been emptied: was it cancelled or hit?\n last_tape_item = lob['tape'][-1]\n if last_tape_item['type'] == 'Cancel':\n bid_hit = False\n else:\n bid_hit = True\n\n # what, if anything, has happened on the ask LOB?\n ask_improved = False\n ask_lifted = False\n lob_best_ask_p = lob['asks']['best']\n lob_best_ask_q = None\n if lob_best_ask_p is not None:\n # non-empty ask LOB\n lob_best_ask_q = lob['asks']['lob'][0][1]\n if (self.prev_best_ask_p is not None) and (self.prev_best_ask_p > lob_best_ask_p):\n # best ask has improved -- NB doesn't check if the improvement was by self\n ask_improved = True\n elif trade is not None and ((self.prev_best_ask_p < lob_best_ask_p) or (\n (self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\n # trade happened and best ask price has got worse, or stayed same but quantity reduced\n # -- assume previous best ask was lifted\n ask_lifted = True\n elif self.prev_best_ask_p is not None:\n # the ask LOB is empty now but was not previously: canceled or lifted?\n last_tape_item = lob['tape'][-1]\n if last_tape_item['type'] == 'Cancel':\n ask_lifted = False\n else:\n ask_lifted = True\n\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\n print('B_improved', bid_improved, 'B_hit', bid_hit, 'A_improved', ask_improved, 'A_lifted', ask_lifted)\n\n deal = bid_hit or ask_lifted\n\n if self.job == 'Ask':\n # seller\n if deal:\n tradeprice = trade['price']\n if self.price <= tradeprice:\n # could sell for more? raise margin\n target_price = target_up(tradeprice)\n profit_alter(target_price)\n elif ask_lifted and self.active and not willing_to_trade(tradeprice):\n # wouldn't have got this deal, still working order, so reduce margin\n target_price = target_down(tradeprice)\n profit_alter(target_price)\n else:\n # no deal: aim for a target price higher than best bid\n if ask_improved and self.price > lob_best_ask_p:\n if lob_best_bid_p is not None:\n target_price = target_up(lob_best_bid_p)\n else:\n target_price = lob['asks']['worst'] # stub quote\n profit_alter(target_price)\n\n if self.job == 'Bid':\n # buyer\n if deal:\n tradeprice = trade['price']\n if self.price >= tradeprice:\n # could buy for less? raise margin (i.e. cut the price)\n target_price = target_down(tradeprice)\n profit_alter(target_price)\n elif bid_hit and self.active and not willing_to_trade(tradeprice):\n # wouldn't have got this deal, still working order, so reduce margin\n target_price = target_up(tradeprice)\n profit_alter(target_price)\n else:\n # no deal: aim for target price lower than best ask\n if bid_improved and self.price < lob_best_bid_p:\n if lob_best_ask_p is not None:\n target_price = target_down(lob_best_ask_p)\n else:\n target_price = lob['bids']['worst'] # stub quote\n profit_alter(target_price)\n\n # remember the best LOB data ready for next response\n self.prev_best_bid_p = lob_best_bid_p\n self.prev_best_bid_q = lob_best_bid_q\n self.prev_best_ask_p = lob_best_ask_p\n self.prev_best_ask_q = lob_best_ask_q\n\n\n# ########################---trader-types have all been defined now--################\n\n\n# #########################---Below lies the experiment/test-rig---##################\n\n\n# trade_stats()\n# dump CSV statistics on exchange data and trader population to file for later analysis\n# this makes no assumptions about the number of types of traders, or\n# the number of traders of any one type -- allows either/both to change\n# between successive calls, but that does make it inefficient as it has to\n# re-analyse the entire set of traders on each call\ndef trade_stats(expid, traders, dumpfile, time, lob):\n\n # Analyse the set of traders, to see what types we have\n trader_types = {}\n for t in traders:\n ttype = traders[t].ttype\n if ttype in trader_types.keys():\n t_balance = trader_types[ttype]['balance_sum'] + traders[t].balance\n n = trader_types[ttype]['n'] + 1\n else:\n t_balance = traders[t].balance\n n = 1\n trader_types[ttype] = {'n': n, 'balance_sum': t_balance}\n\n # first two columns of output are the session_id and the time\n dumpfile.write('%s, %06d, ' % (expid, time))\n\n # second two columns of output are the LOB best bid and best offer (or 'None' if they're undefined)\n if lob['bids']['best'] is not None:\n dumpfile.write('%d, ' % (lob['bids']['best']))\n else:\n dumpfile.write('None, ')\n if lob['asks']['best'] is not None:\n dumpfile.write('%d, ' % (lob['asks']['best']))\n else:\n dumpfile.write('None, ')\n\n # total remaining number of columns printed depends on number of different trader-types at this timestep\n # for each trader type we print FOUR columns...\n # TraderTypeCode, TotalProfitForThisTraderType, NumberOfTradersOfThisType, AverageProfitPerTraderOfThisType\n for ttype in sorted(list(trader_types.keys())):\n n = trader_types[ttype]['n']\n s = trader_types[ttype]['balance_sum']\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\n\n dumpfile.write('\\n')\n\n\n# create a bunch of traders from traders_spec\n# returns tuple (n_buyers, n_sellers)\n# optionally shuffles the pack of buyers and the pack of sellers\ndef populate_market(traders_spec, traders, shuffle, verbose):\n # traders_spec is a list of buyer-specs and a list of seller-specs\n # each spec is (<trader type>, <number of this type of trader>, optionally: <params for this type of trader>)\n\n def trader_type(robottype, name, parameters):\n balance = 0.00\n time0 = 0\n if robottype == 'GVWY':\n return Trader_Giveaway('GVWY', name, balance, parameters, time0)\n elif robottype == 'ZIC':\n return Trader_ZIC('ZIC', name, balance, parameters, time0)\n elif robottype == 'SHVR':\n return Trader_Shaver('SHVR', name, balance, parameters, time0)\n elif robottype == 'SNPR':\n return Trader_Sniper('SNPR', name, balance, parameters, time0)\n elif robottype == 'ZIP':\n return Trader_ZIP('ZIP', name, balance, parameters, time0)\n elif robottype == 'PRZI':\n return Trader_PRZI('PRZI', name, balance, parameters, time0)\n elif robottype == 'PRSH':\n return Trader_PRZI('PRSH', name, balance, parameters, time0)\n elif robottype == 'PRDE':\n return Trader_PRZI('PRDE', name, balance, parameters, time0)\n else:\n sys.exit('FATAL: don\\'t know robot type %s\\n' % robottype)\n\n def shuffle_traders(ttype_char, n, traders):\n for swap in range(n):\n t1 = (n - 1) - swap\n t2 = random.randint(0, t1)\n t1name = '%c%02d' % (ttype_char, t1)\n t2name = '%c%02d' % (ttype_char, t2)\n traders[t1name].tid = t2name\n traders[t2name].tid = t1name\n temp = traders[t1name]\n traders[t1name] = traders[t2name]\n traders[t2name] = temp\n\n def unpack_params(trader_params, mapping):\n # unpack the parameters for PRZI-family of strategies\n parameters = None\n if ttype == 'PRSH' or ttype == 'PRDE' or ttype == 'PRZI':\n # parameters matter...\n if mapping:\n parameters = 'landscape-mapper'\n else:\n # params determines type of optimizer used\n if ttype == 'PRSH':\n parameters = {'optimizer': 'PRSH', 'k': trader_params['k'],\n 'strat_min': trader_params['s_min'], 'strat_max': trader_params['s_max']}\n elif ttype == 'PRDE':\n parameters = {'optimizer': 'PRDE', 'k': trader_params['k'],\n 'strat_min': trader_params['s_min'], 'strat_max': trader_params['s_max']}\n else: # ttype=PRZI\n parameters = {'optimizer': None, 'k': 1,\n 'strat_min': trader_params['s_min'], 'strat_max': trader_params['s_max']}\n\n return parameters\n\n landscape_mapping = False # set to true when mapping fitness landscape (for PRSH etc).\n\n # the code that follows is a bit of a kludge, needs tidying up.\n n_buyers = 0\n for bs in traders_spec['buyers']:\n ttype = bs[0]\n for b in range(bs[1]):\n tname = 'B%02d' % n_buyers # buyer i.d. string\n if len(bs) > 2:\n # third part of the buyer-spec is params for this trader-type\n params = unpack_params(bs[2], landscape_mapping)\n else:\n params = unpack_params(None, landscape_mapping)\n traders[tname] = trader_type(ttype, tname, params)\n n_buyers = n_buyers + 1\n\n if n_buyers < 1:\n sys.exit('FATAL: no buyers specified\\n')\n\n if shuffle:\n shuffle_traders('B', n_buyers, traders)\n\n n_sellers = 0\n for ss in traders_spec['sellers']:\n ttype = ss[0]\n for s in range(ss[1]):\n tname = 'S%02d' % n_sellers # buyer i.d. string\n if len(ss) > 2:\n # third part of the buyer-spec is params for this trader-type\n params = unpack_params(ss[2], landscape_mapping)\n else:\n params = unpack_params(None, landscape_mapping)\n traders[tname] = trader_type(ttype, tname, params)\n n_sellers = n_sellers + 1\n\n if n_sellers < 1:\n sys.exit('FATAL: no sellers specified\\n')\n\n if shuffle:\n shuffle_traders('S', n_sellers, traders)\n\n if verbose:\n for t in range(n_buyers):\n bname = 'B%02d' % t\n print(traders[bname])\n for t in range(n_sellers):\n bname = 'S%02d' % t\n print(traders[bname])\n\n return {'n_buyers': n_buyers, 'n_sellers': n_sellers}\n\n\n# customer_orders(): allocate orders to traders\n# parameter \"os\" is order schedule\n# os['timemode'] is either 'periodic', 'drip-fixed', 'drip-jitter', or 'drip-poisson'\n# os['interval'] is number of seconds for a full cycle of replenishment\n# drip-poisson sequences will be normalised to ensure time of last replenishment <= interval\n# parameter \"pending\" is the list of future orders (if this is empty, generates a new one from os)\n# revised \"pending\" is the returned value\n#\n# also returns a list of \"cancellations\": trader-ids for those traders who are now working a new order and hence\n# need to kill quotes already on LOB from working previous order\n#\n#\n# if a supply or demand schedule mode is \"random\" and more than one range is supplied in ranges[],\n# then each time a price is generated one of the ranges is chosen equiprobably and\n# the price is then generated uniform-randomly from that range\n#\n# if len(range)==2, interpreted as min and max values on the schedule, specifying linear supply/demand curve\n# if len(range)==3, first two vals are min & max, third value should be a function that generates a dynamic price offset\n# -- the offset value applies equally to the min & max, so gradient of linear sup/dem curve doesn't vary\n# if len(range)==4, the third value is function that gives dynamic offset for schedule min,\n# and fourth is a function giving dynamic offset for schedule max, so gradient of sup/dem linear curve can vary\n#\n# the interface on this is a bit of a mess... could do with refactoring\n\n\ndef customer_orders(time, last_update, traders, trader_stats, os, pending, verbose):\n\n def sysmin_check(price):\n if price < bse_sys_minprice:\n print('WARNING: price < bse_sys_min -- clipped')\n price = bse_sys_minprice\n return price\n\n def sysmax_check(price):\n if price > bse_sys_maxprice:\n print('WARNING: price > bse_sys_max -- clipped')\n price = bse_sys_maxprice\n return price\n\n def getorderprice(i, sched, n, mode, issuetime):\n # does the first schedule range include optional dynamic offset function(s)?\n if len(sched[0]) > 2:\n offsetfn = sched[0][2]\n if callable(offsetfn):\n # same offset for min and max\n offset_min = offsetfn(issuetime)\n offset_max = offset_min\n else:\n sys.exit('FAIL: 3rd argument of sched in getorderprice() not callable')\n if len(sched[0]) > 3:\n # if second offset function is specfied, that applies only to the max value\n offsetfn = sched[0][3]\n if callable(offsetfn):\n # this function applies to max\n offset_max = offsetfn(issuetime)\n else:\n sys.exit('FAIL: 4th argument of sched in getorderprice() not callable')\n else:\n offset_min = 0.0\n offset_max = 0.0\n\n pmin = sysmin_check(offset_min + min(sched[0][0], sched[0][1]))\n pmax = sysmax_check(offset_max + max(sched[0][0], sched[0][1]))\n prange = pmax - pmin\n stepsize = prange / (n - 1)\n halfstep = round(stepsize / 2.0)\n\n if mode == 'fixed':\n orderprice = pmin + int(i * stepsize)\n elif mode == 'jittered':\n orderprice = pmin + int(i * stepsize) + random.randint(-halfstep, halfstep)\n elif mode == 'random':\n if len(sched) > 1:\n # more than one schedule: choose one equiprobably\n s = random.randint(0, len(sched) - 1)\n pmin = sysmin_check(min(sched[s][0], sched[s][1]))\n pmax = sysmax_check(max(sched[s][0], sched[s][1]))\n orderprice = random.randint(pmin, pmax)\n else:\n sys.exit('FAIL: Unknown mode in schedule')\n orderprice = sysmin_check(sysmax_check(orderprice))\n return orderprice\n\n def getissuetimes(n_traders, mode, interval, shuffle, fittointerval):\n interval = float(interval)\n if n_traders < 1:\n sys.exit('FAIL: n_traders < 1 in getissuetime()')\n elif n_traders == 1:\n tstep = interval\n else:\n tstep = interval / (n_traders - 1)\n arrtime = 0\n issuetimes = []\n for t in range(n_traders):\n if mode == 'periodic':\n arrtime = interval\n elif mode == 'drip-fixed':\n arrtime = t * tstep\n elif mode == 'drip-jitter':\n arrtime = t * tstep + tstep * random.random()\n elif mode == 'drip-poisson':\n # poisson requires a bit of extra work\n interarrivaltime = random.expovariate(n_traders / interval)\n arrtime += interarrivaltime\n else:\n sys.exit('FAIL: unknown time-mode in getissuetimes()')\n issuetimes.append(arrtime)\n\n # at this point, arrtime is the last arrival time\n if fittointerval and ((arrtime > interval) or (arrtime < interval)):\n # generated sum of interarrival times longer than the interval\n # squish them back so that last arrival falls at t=interval\n for t in range(n_traders):\n issuetimes[t] = interval * (issuetimes[t] / arrtime)\n # optionally randomly shuffle the times\n if shuffle:\n for t in range(n_traders):\n i = (n_traders - 1) - t\n j = random.randint(0, i)\n tmp = issuetimes[i]\n issuetimes[i] = issuetimes[j]\n issuetimes[j] = tmp\n return issuetimes\n\n def getschedmode(time, os):\n got_one = False\n for sched in os:\n if (sched['from'] <= time) and (time < sched['to']):\n # within the timezone for this schedule\n schedrange = sched['ranges']\n mode = sched['stepmode']\n got_one = True\n break # jump out the loop -- so the first matching timezone has priority over any others\n if not got_one:\n sys.exit('Fail: time=%5.2f not within any timezone in os=%s' % (time, os))\n return (schedrange, mode)\n\n n_buyers = trader_stats['n_buyers']\n n_sellers = trader_stats['n_sellers']\n\n shuffle_times = True\n\n cancellations = []\n\n if len(pending) < 1:\n # list of pending (to-be-issued) customer orders is empty, so generate a new one\n new_pending = []\n\n # demand side (buyers)\n issuetimes = getissuetimes(n_buyers, os['timemode'], os['interval'], shuffle_times, True)\n\n ordertype = 'Bid'\n (sched, mode) = getschedmode(time, os['dem'])\n for t in range(n_buyers):\n issuetime = time + issuetimes[t]\n tname = 'B%02d' % t\n orderprice = getorderprice(t, sched, n_buyers, mode, issuetime)\n order = Order(tname, ordertype, orderprice, 1, issuetime, chrono.time())\n new_pending.append(order)\n\n # supply side (sellers)\n issuetimes = getissuetimes(n_sellers, os['timemode'], os['interval'], shuffle_times, True)\n ordertype = 'Ask'\n (sched, mode) = getschedmode(time, os['sup'])\n for t in range(n_sellers):\n issuetime = time + issuetimes[t]\n tname = 'S%02d' % t\n orderprice = getorderprice(t, sched, n_sellers, mode, issuetime)\n # print('time %d sellerprice %d' % (time,orderprice))\n order = Order(tname, ordertype, orderprice, 1, issuetime, chrono.time())\n new_pending.append(order)\n else:\n # there are pending future orders: issue any whose timestamp is in the past\n new_pending = []\n for order in pending:\n if order.time < time:\n # this order should have been issued by now\n # issue it to the trader\n tname = order.tid\n response = traders[tname].add_order(order, verbose)\n if verbose:\n print('Customer order: %s %s' % (response, order))\n if response == 'LOB_Cancel':\n cancellations.append(tname)\n if verbose:\n print('Cancellations: %s' % cancellations)\n # and then don't add it to new_pending (i.e., delete it)\n else:\n # this order stays on the pending list\n new_pending.append(order)\n return [new_pending, cancellations]\n\n\n# one session in the market\ndef market_session(sess_id, starttime, endtime, trader_spec, order_schedule, avg_bals, dump_all, verbose):\n\n\n def dump_strats_frame(time, stratfile, trdrs):\n # write one frame of strategy snapshot\n\n line_str = 't=,%.0f, ' % time\n\n best_buyer_id = None\n best_buyer_prof = 0\n best_buyer_strat = 0\n best_seller_id = None\n best_seller_prof = 0\n best_seller_strat = 0\n\n for t in traders:\n trader = trdrs[t]\n\n # print('PRSH/PRDE recording, t=%s' % trader)\n if trader.ttype == 'PRSH' or trader.ttype == 'PRDE':\n line_str += 'id=,%s, %s,' % (trader.tid, trader.ttype)\n\n # line_str += 'bal=$,%f, n_trades=,%d, n_strats=,2, ' % (trader.balance, trader.n_trades)\n\n act_strat = trader.strats[trader.active_strat]['stratval']\n act_prof = trader.strats[trader.active_strat]['pps']\n\n line_str += 'actvstrat=,%f, ' % act_strat\n line_str += 'actvprof=,%f, ' % act_prof\n\n if trader.tid[:1] == 'B':\n # this trader is a buyer\n if best_buyer_id is None or act_prof > best_buyer_prof:\n best_buyer_id = trader.tid\n best_buyer_strat = act_strat\n best_buyer_prof = act_prof\n elif trader.tid[:1] == 'S':\n # this trader is a seller\n if best_seller_id is None or act_prof > best_seller_prof:\n best_seller_id = trader.tid\n best_seller_strat = act_strat\n best_seller_prof = act_prof\n else:\n # wtf?\n sys.exit('unknown trader id type in market_session')\n\n line_str += 'best_B_id=,%s, best_B_prof=,%f, best_B_strat=,%f, ' % \\\n (best_buyer_id, best_buyer_prof, best_buyer_strat)\n line_str += 'best_S_id=,%s, best_S_prof=,%f, best_S_strat=,%f, ' % \\\n (best_seller_id, best_seller_prof, best_seller_strat)\n line_str += '\\n'\n stratfile.write(line_str)\n stratfile.flush()\n\n\n def blotter_dump(session_id, traders):\n bdump = open(session_id+'_blotters.csv', 'w')\n for t in traders:\n bdump.write('%s, %d\\n'% (traders[t].tid, len(traders[t].blotter)))\n for b in traders[t].blotter:\n bdump.write('%s, %s, %.3f, %d, %s, %s, %d\\n'\n % (traders[t].tid, b['type'], b['time'], b['price'], b['party1'], b['party2'], b['qty']))\n bdump.close()\n\n\n orders_verbose = False\n lob_verbose = False\n process_verbose = False\n respond_verbose = False\n bookkeep_verbose = False\n populate_verbose = False\n\n strat_dump = open(sess_id + '_strats.csv', 'w')\n\n lobframes = open(sess_id + '_LOB_frames.csv', 'w')\n lobframes = None # this disables writing of the LOB frames (which can generate HUGE files)\n\n # initialise the exchange\n exchange = Exchange()\n\n # create a bunch of traders\n traders = {}\n trader_stats = populate_market(trader_spec, traders, True, populate_verbose)\n\n # timestep set so that can process all traders in one second\n # NB minimum interarrival time of customer orders may be much less than this!!\n timestep = 1.0 / float(trader_stats['n_buyers'] + trader_stats['n_sellers'])\n\n duration = float(endtime - starttime)\n\n last_update = -1.0\n\n time = starttime\n\n pending_cust_orders = []\n\n if verbose:\n print('\\n%s; ' % sess_id)\n\n # frames_done is record of what frames we have printed data for thus far\n frames_done = set()\n\n while time < endtime:\n\n # how much time left, as a percentage?\n time_left = (endtime - time) / duration\n\n # if verbose: print('\\n\\n%s; t=%08.2f (%4.1f/100) ' % (sess_id, time, time_left*100))\n\n trade = None\n\n [pending_cust_orders, kills] = customer_orders(time, last_update, traders, trader_stats,\n order_schedule, pending_cust_orders, orders_verbose)\n\n # if any newly-issued customer orders mean quotes on the LOB need to be cancelled, kill them\n if len(kills) > 0:\n # if verbose : print('Kills: %s' % (kills))\n for kill in kills:\n # if verbose : print('lastquote=%s' % traders[kill].lastquote)\n if traders[kill].lastquote is not None:\n # if verbose : print('Killing order %s' % (str(traders[kill].lastquote)))\n exchange.del_order(time, traders[kill].lastquote, verbose)\n\n # get a limit-order quote (or None) from a randomly chosen trader\n tid = list(traders.keys())[random.randint(0, len(traders) - 1)]\n order = traders[tid].getorder(time, time_left, exchange.publish_lob(time, lobframes, lob_verbose))\n\n # if verbose: print('Trader Quote: %s' % (order))\n\n if order is not None:\n if order.otype == 'Ask' and order.price < traders[tid].orders[0].price:\n sys.exit('Bad ask')\n if order.otype == 'Bid' and order.price > traders[tid].orders[0].price:\n sys.exit('Bad bid')\n # send order to exchange\n traders[tid].n_quotes = 1\n trade = exchange.process_order2(time, order, process_verbose)\n if trade is not None:\n # trade occurred,\n # so the counterparties update order lists and blotters\n traders[trade['party1']].bookkeep(trade, order, bookkeep_verbose, time)\n traders[trade['party2']].bookkeep(trade, order, bookkeep_verbose, time)\n if dump_all:\n trade_stats(sess_id, traders, avg_bals, time, exchange.publish_lob(time, lobframes, lob_verbose))\n\n # traders respond to whatever happened\n lob = exchange.publish_lob(time, lobframes, lob_verbose)\n for t in traders:\n # NB respond just updates trader's internal variables\n # doesn't alter the LOB, so processing each trader in\n # sequence (rather than random/shuffle) isn't a problem\n traders[t].respond(time, lob, trade, respond_verbose)\n\n # log all the PRSH/PRD/etc strategy info for this timestep?\n frame_rate = 60 * 60 # print one frame every this many simulated seconds\n\n if int(time) % frame_rate == 0 and int(time) not in frames_done:\n # print one more frame to strategy dumpfile\n dump_strats_frame(time, strat_dump, traders)\n # record that we've written this frame\n frames_done.add(int(time))\n\n time = time + timestep\n\n # session has ended\n\n strat_dump.close()\n\n if lobframes is not None:\n lobframes.close()\n\n dump_all = True\n\n if dump_all:\n\n # dump the tape (transactions only -- not writing cancellations)\n exchange.tape_dump(sess_id+'_tape.csv', 'w', 'keep')\n\n # record the blotter for each trader\n blotter_dump(sess_id, traders)\n\n\n # write trade_stats for this session (NB end-of-session summary only)\n trade_stats(sess_id, traders, avg_bals, time, exchange.publish_lob(time, lobframes, lob_verbose))\n\n\n\n#############################\n\n# # Below here is where we set up and run a whole series of experiments\n\n\nif __name__ == \"__main__\":\n\n # set up common parameters for all market sessions\n n_days = 1.0 # 1000 days is good, but 3*365=1095, so may as well go for three years.\n start_time = 0.0\n end_time = 60.0 * 60.0 * 24 * n_days\n duration = end_time - start_time\n\n\n # schedule_offsetfn returns time-dependent offset, to be added to schedule prices\n def schedule_offsetfn(t):\n\n pi2 = math.pi * 2\n c = math.pi * 3000\n wavelength = t / c\n gradient = 100 * t / (c / pi2)\n amplitude = 100 * t / (c / pi2)\n offset = gradient + amplitude * math.sin(wavelength * t)\n return int(round(offset, 0))\n\n # Here is an example of how to use the offset function\n #\n # range1 = (10, 190, schedule_offsetfn)\n # range2 = (200, 300, schedule_offsetfn)\n\n # Here is an example of how to switch from range1 to range2 and then back to range1,\n # introducing two \"market shocks\"\n # -- here the timings of the shocks are at 1/3 and 2/3 into the duration of the session.\n #\n # supply_schedule = [ {'from':start_time, 'to':duration/3, 'ranges':[range1], 'stepmode':'fixed'},\n # {'from':duration/3, 'to':2*duration/3, 'ranges':[range2], 'stepmode':'fixed'},\n # {'from':2*duration/3, 'to':end_time, 'ranges':[range1], 'stepmode':'fixed'}\n # ]\n\n\n # The code below sets up symmetric supply and demand curves at prices from 50 to 150, P0=100\n\n range1 = (60, 60)\n supply_schedule = [{'from': start_time, 'to': end_time, 'ranges': [range1], 'stepmode': 'fixed'}]\n\n range2 = (100, 100)\n demand_schedule = [{'from': start_time, 'to': end_time, 'ranges': [range2], 'stepmode': 'fixed'}]\n\n # The code below sets up flat (perfectly elastic) supply and demand curves at prices of 50 and 150, P0=100\n\n #range1 = (60, 60)\n #supply_schedule = [{'from': start_time, 'to': end_time, 'ranges': [range1], 'stepmode': 'fixed'}]\n\n #range2 = (140, 140)\n #demand_schedule = [{'from': start_time, 'to': end_time, 'ranges': [range2], 'stepmode': 'fixed'}]\n\n # new customer orders arrive at each trader approx once every order_interval seconds\n order_interval = 5\n\n order_sched = {'sup': supply_schedule, 'dem': demand_schedule,\n 'interval': order_interval, 'timemode': 'drip-poisson'}\n\n # Use 'periodic' if you want the traders' assignments to all arrive simultaneously & periodically\n # 'order_interval': 30, 'timemode': 'periodic'}\n\n # buyers_spec = [('GVWY',10),('SHVR',10),('ZIC',10),('ZIP',10)]\n # sellers_spec = [('GVWY',10),('SHVR',10),('ZIC',10),('ZIP',10)]\n\n opponent = 'GVWY'\n opp_N = 30\n# sellers_spec = [('PRSH', 30),(opponent, opp_N-1)]\n# buyers_spec = [(opponent, opp_N)]\n\n sellers_spec = [('PRSH', 5, {'k': 4, 's_min': -1.0, 's_max': +1.0}),\n ('PRDE', 5, {'k': 4, 's_min': -1.0, 's_max': +1.0})]\n buyers_spec = sellers_spec\n\n traders_spec = {'sellers': sellers_spec, 'buyers': buyers_spec}\n\n # run a sequence of trials, one session per trial\n\n verbose = True\n\n # n_trials is how many trials (i.e. market sessions) to run in total\n n_trials = 10\n\n # n_recorded is how many trials (i.e. market sessions) to write full data-files for\n n_trials_recorded = 0\n\n trial = 1\n\n while trial < (n_trials+1):\n # create unique i.d. string for this trial\n trial_id = 'bse_d%03d_i%02d_%04d' % (n_days, order_interval, trial)\n\n if trial > n_trials_recorded:\n dump_all = False\n else:\n dump_all = True\n\n balances_file = open(trial_id + '_avg_balance.csv', 'w')\n market_session(trial_id, start_time, end_time, traders_spec, order_sched, balances_file, dump_all, verbose)\n balances_file.close()\n trial = trial + 1\n\n # run a sequence of trials that exhaustively varies the ratio of four trader types\n # NB this has weakness of symmetric proportions on buyers/sellers -- combinatorics of varying that are quite nasty\n #\n # n_trader_types = 4\n # equal_ratio_n = 4\n # n_trials_per_ratio = 50\n #\n # n_traders = n_trader_types * equal_ratio_n\n #\n # fname = 'balances_%03d.csv' % equal_ratio_n\n #\n # tdump = open(fname, 'w')\n #\n # min_n = 1\n #\n # trialnumber = 1\n # trdr_1_n = min_n\n # while trdr_1_n <= n_traders:\n # trdr_2_n = min_n\n # while trdr_2_n <= n_traders - trdr_1_n:\n # trdr_3_n = min_n\n # while trdr_3_n <= n_traders - (trdr_1_n + trdr_2_n):\n # trdr_4_n = n_traders - (trdr_1_n + trdr_2_n + trdr_3_n)\n # if trdr_4_n >= min_n:\n # buyers_spec = [('GVWY', trdr_1_n), ('SHVR', trdr_2_n),\n # ('ZIC', trdr_3_n), ('ZIP', trdr_4_n)]\n # sellers_spec = buyers_spec\n # traders_spec = {'sellers': sellers_spec, 'buyers': buyers_spec}\n # # print buyers_spec\n # trial = 1\n # while trial <= n_trials_per_ratio:\n # trial_id = 'trial%07d' % trialnumber\n # market_session(trial_id, start_time, end_time, traders_spec,\n # order_sched, tdump, False, True)\n # tdump.flush()\n # trial = trial + 1\n # trialnumber = trialnumber + 1\n # trdr_3_n += 1\n # trdr_2_n += 1\n # trdr_1_n += 1\n # tdump.close()\n #\n # print(trialnumber)\n\n" }, { "alpha_fraction": 0.3851630389690399, "alphanum_fraction": 0.3967914879322052, "avg_line_length": 38.98638153076172, "blob_id": "e5c5f3f38447911f115a96cc29fd04139dabf62f", "content_id": "673022be5274affc4c996171696cfba10bfce867", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21069, "license_type": "permissive", "max_line_length": 224, "num_lines": 514, "path": "/ZhenZhang/source/IGDX_MLOFI.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# Trader subclass ZIP\r\n# After Cliff 1997\r\n\r\n\r\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\nclass Trader_IGDX_MLOFI(Trader):\r\n\r\n def __init__(self, ttype, tid, balance, time,m):\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n self.active = False\r\n self.limit = None\r\n self.job = None\r\n\r\n #memory of all bids and asks and accepted bids and asks\r\n self.outstanding_bids = []\r\n self.outstanding_asks = []\r\n self.accepted_asks = []\r\n self.accepted_bids = []\r\n\r\n self.price = -1\r\n\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n\r\n self.first_turn = True\r\n\r\n self.gamma = 0.1\r\n\r\n self.holdings = 10\r\n self.remaining_offer_ops = 10\r\n self.values = [[0 for n in range(self.remaining_offer_ops)] for m in range(self.holdings)]\r\n\r\n # variable for MLOFI\r\n self.last_lob = None;\r\n self.es_list = [];\r\n self.ds_list = [];\r\n\r\n # variable for ratio\r\n self.bids_volume_list = []\r\n self.asks_volume_list = []\r\n\r\n # variable\r\n self.m = m;\r\n\r\n def is_imbalance_significant(self, m, threshold):\r\n cb_list = [0 for i in range(m)]\r\n ab_list = []\r\n\r\n ca_list = [0 for i in range(m)]\r\n aa_list = []\r\n\r\n n = 1\r\n\r\n while len(self.bids_volume_list) >= n and len(self.asks_volume_list) >= n:\r\n for i in range(m):\r\n cb_list[i] += self.bids_volume_list[-n]['level' + str(i + 1)]\r\n ca_list[i] += self.asks_volume_list[-n]['level' + str(i + 1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n for i in range(m):\r\n temp1 = None\r\n temp2 = None\r\n if n == 1:\r\n temp1 = cb_list[i] + 1\r\n temp2 = ca_list[i] + 1\r\n else:\r\n temp1 = cb_list[i] / (n - 1) + 1\r\n temp2 = ca_list[i] / (n - 1) + 1\r\n ab_list.append(temp1)\r\n aa_list.append(temp2)\r\n\r\n v_bid = 0;\r\n v_ask = 0;\r\n for i in range(m):\r\n v_bid += math.exp(-0.5 * i) * ab_list[i];\r\n v_ask += math.exp(-0.5 * i) * aa_list[i];\r\n ratio = (v_bid - v_ask) / (v_bid + v_ask);\r\n\r\n # print self.bids_volume_list\r\n # print self.asks_volume_list\r\n # print ratio\r\n\r\n if (ratio > threshold or ratio < -threshold):\r\n return True\r\n else:\r\n return False\r\n\r\n def calc_bids_volume(self, lob, m, verbose):\r\n new_b = {}\r\n\r\n for i in range(1, m + 1):\r\n new_b['level' + str(i)] = self.cal_bids_n(lob, i)\r\n\r\n self.bids_volume_list.append(new_b)\r\n\r\n def cal_bids_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n return r_n\r\n\r\n def calc_asks_volume(self, lob, m, verbose):\r\n new_a = {}\r\n\r\n for i in range(1, m + 1):\r\n new_a['level' + str(i)] = self.cal_asks_n(lob, i);\r\n\r\n self.asks_volume_list.append(new_a)\r\n\r\n def cal_asks_n(self, lob, n):\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return q_n\r\n\r\n def calc_level_n_e(self, current_lob, n):\r\n b_n = 0\r\n r_n = 0\r\n a_n = 0\r\n q_n = 0\r\n\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n\r\n if (len(current_lob['bids']['lob']) < n):\r\n b_n = 0\r\n r_n = 0\r\n else:\r\n b_n = current_lob['bids']['lob'][n - 1][0]\r\n r_n = current_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['bids']['lob']) < n):\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n else:\r\n b_n_1 = self.last_lob['bids']['lob'][n - 1][0]\r\n r_n_1 = self.last_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(current_lob['asks']['lob']) < n):\r\n a_n = 0\r\n q_n = 0\r\n else:\r\n a_n = current_lob['asks']['lob'][n - 1][0]\r\n q_n = current_lob['asks']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['asks']['lob']) < n):\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n else:\r\n a_n_1 = self.last_lob['asks']['lob'][n - 1][0]\r\n q_n_1 = self.last_lob['asks']['lob'][n - 1][1]\r\n\r\n delta_w = 0;\r\n\r\n if (b_n > b_n_1):\r\n delta_w = r_n\r\n elif (b_n == b_n_1):\r\n delta_w = r_n - r_n_1\r\n else:\r\n delta_w = -r_n_1\r\n\r\n delta_v = 0\r\n if (a_n > a_n_1):\r\n delta_v = -q_n_1\r\n elif (a_n == a_n_1):\r\n delta_v = q_n - q_n_1\r\n else:\r\n delta_v = q_n\r\n\r\n return delta_w - delta_v\r\n\r\n def calc_es(self, lob, m, verbose):\r\n new_e = {}\r\n for i in range(1, m + 1):\r\n new_e['level' + str(i)] = self.calc_level_n_e(lob, i)\r\n\r\n self.es_list.append(new_e)\r\n\r\n def calc_ds(self, lob, m, verbose):\r\n new_d = {}\r\n\r\n for i in range(1, m + 1):\r\n new_d['level' + str(i)] = self.cal_depth_n(lob, i)\r\n\r\n self.ds_list.append(new_d)\r\n\r\n def cal_depth_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return (r_n + q_n) / 2\r\n\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n def imbalance_alter(quoteprice_aa, lob, countdown, m):\r\n\r\n mlofi_list = [0 for i in range(m)]\r\n cd_list = [0 for i in range(m)]\r\n ad_list = []\r\n n = 1\r\n\r\n while len(self.es_list) >= n:\r\n for i in range(m):\r\n mlofi_list[i] += self.es_list[-n]['level' + str(i + 1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n n = 1\r\n\r\n while len(self.ds_list) >= n:\r\n for i in range(m):\r\n cd_list[i] += self.ds_list[-n]['level' + str(i + 1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n for i in range(m):\r\n temp = None\r\n if n == 1:\r\n temp = cd_list[i] + 1\r\n else:\r\n temp = cd_list[i] / (n - 1) + 1\r\n ad_list.append(temp)\r\n\r\n c = 5\r\n decay = 0.8\r\n offset = 0\r\n\r\n for i in range(m):\r\n offset += int(mlofi_list[i] * c * pow(decay, i) / ad_list[i])\r\n\r\n benchmark = quoteprice_aa;\r\n if (lob['midprice'] != None):\r\n benchmark = lob['midprice']\r\n # print 'midprice is %d' % benchmark\r\n\r\n quoteprice_iaa = quoteprice_aa + 0.8 * (benchmark + offset - quoteprice_aa)\r\n\r\n if self.job == 'Bid' and quoteprice_iaa > self.limit:\r\n quoteprice_iaa = self.limit\r\n if self.job == 'Ask' and quoteprice_iaa < self.limit:\r\n quoteprice_iaa = self.limit\r\n\r\n\r\n\r\n if countdown < 0.3:\r\n print \"insert\"\r\n if self.job == 'Bid' and (len(lob['asks']['lob']) >= 1) and lob['asks']['lob'][0][0] < self.limit:\r\n quoteprice_iaa = lob['asks']['lob'][0][0]\r\n if self.job == 'Ask' and (len(lob['bids']['lob']) >= 1) and lob['bids']['lob'][0][0] > self.limit:\r\n quoteprice_iaa = lob['bids']['lob'][0][0]\r\n\r\n if self.job == 'Bid' and quoteprice_iaa < bse_sys_minprice:\r\n quoteprice_iaa = bse_sys_minprice+1\r\n if self.job == 'Ask' and quoteprice_iaa > bse_sys_maxprice:\r\n quoteprice_iaa = bse_sys_maxprice-1\r\n\r\n\r\n return quoteprice_iaa\r\n\r\n\r\n\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n return order\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].atype\r\n\r\n #calculate price\r\n if self.job == 'Bid':\r\n self.price = self.calc_p_bid(self.holdings - 1, self.remaining_offer_ops - 1)\r\n if self.job == 'Ask':\r\n self.price = self.calc_p_ask(self.holdings - 1, self.remaining_offer_ops - 1)\r\n\r\n quoteprice = self.price\r\n\r\n # print \"before:\"\r\n # print self.price\r\n if (self.is_imbalance_significant(self.m, 0.6)):\r\n # print \"abvious\"\r\n quoteprice_igdx = imbalance_alter(quoteprice, lob, countdown, self.m)\r\n else:\r\n # print \"not abvious\"\r\n quoteprice_igdx = quoteprice\r\n\r\n\r\n\r\n\r\n self.price = quoteprice_igdx\r\n #\r\n # print \"after:\"\r\n # print self.price\r\n order = Order(self.tid, self.job, 'LIM',self.price, self.orders[0].qty, time, None, -1)\r\n self.lastquote = order\r\n\r\n if self.first_turn or self.price == -1:\r\n if self.job == 'Bid':\r\n order = Order(self.tid, self.job, 'LIM', bse_sys_minprice+1, self.orders[0].qty, time, None, -1)\r\n if self.job == 'Ask':\r\n order = Order(self.tid, self.job, 'LIM', bse_sys_maxprice-1, self.orders[0].qty, time, None, -1)\r\n # print order\r\n\r\n return order\r\n\r\n def calc_p_bid(self, m, n):\r\n best_return = 0\r\n best_bid = 0\r\n second_best_return = 0\r\n second_best_bid = 0\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n thing = self.belief_buy(i) * ((self.limit - i) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_bid = best_bid\r\n second_best_return = best_return\r\n best_return = thing\r\n best_bid = i\r\n\r\n #always best bid largest one\r\n if second_best_bid > best_bid:\r\n a = second_best_bid\r\n second_best_bid = best_bid\r\n best_bid = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_bid), int(best_bid))]:\r\n thing = self.belief_buy(i + second_best_bid) * ((self.limit - (i + second_best_bid)) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_buy(i + second_best_bid) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_bid = i + second_best_bid\r\n\r\n return best_bid\r\n\r\n def calc_p_ask(self, m, n):\r\n best_return = 0\r\n best_ask = self.limit\r\n second_best_return = 0\r\n second_best_ask = self.limit\r\n\r\n #first step size of 1 get best and 2nd best\r\n for i in [x*2 for x in range(int(self.limit/2))]:\r\n j = i + self.limit\r\n thing = self.belief_sell(j) * ((j - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(j) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n second_best_ask = best_ask\r\n second_best_return = best_return\r\n best_return = thing\r\n best_ask = j\r\n #always best ask largest one\r\n if second_best_ask > best_ask:\r\n a = second_best_ask\r\n second_best_ask = best_ask\r\n best_ask = a\r\n\r\n #then step size 0.05\r\n for i in [x*0.05 for x in range(int(second_best_ask), int(best_ask))]:\r\n thing = self.belief_sell(i + second_best_ask) * (((i + second_best_ask) - self.limit) + self.gamma*self.values[m-1][n-1]) + (1-self.belief_sell(i + second_best_ask) * self.gamma * self.values[m][n-1])\r\n if thing > best_return:\r\n best_return = thing\r\n best_ask = i + second_best_ask\r\n\r\n return best_ask\r\n\r\n def belief_sell(self, price):\r\n accepted_asks_greater = 0\r\n bids_greater = 0\r\n unaccepted_asks_lower = 0\r\n for p in self.accepted_asks:\r\n if p >= price:\r\n accepted_asks_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n bids_greater += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n unaccepted_asks_lower += 1\r\n\r\n if accepted_asks_greater + bids_greater + unaccepted_asks_lower == 0:\r\n return 0\r\n return (accepted_asks_greater + bids_greater) / (accepted_asks_greater + bids_greater + unaccepted_asks_lower)\r\n\r\n def belief_buy(self, price):\r\n accepted_bids_lower = 0\r\n asks_lower = 0\r\n unaccepted_bids_greater = 0\r\n for p in self.accepted_bids:\r\n if p <= price:\r\n accepted_bids_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_asks]:\r\n if p <= price:\r\n asks_lower += 1\r\n for p in [thing[0] for thing in self.outstanding_bids]:\r\n if p >= price:\r\n unaccepted_bids_greater += 1\r\n if accepted_bids_lower + asks_lower + unaccepted_bids_greater == 0:\r\n return 0\r\n return (accepted_bids_lower + asks_lower) / (accepted_bids_lower + asks_lower + unaccepted_bids_greater)\r\n\r\n def respond(self, time, lob, trade, verbose):\r\n\r\n if (self.last_lob == None):\r\n self.last_lob = lob\r\n else:\r\n self.calc_es(lob, self.m, verbose)\r\n self.calc_ds(lob, self.m, verbose)\r\n self.calc_bids_volume(lob, self.m, verbose)\r\n self.calc_asks_volume(lob, self.m, verbose)\r\n self.last_lob = lob;\r\n\r\n # what, if anything, has happened on the bid LOB?\r\n self.outstanding_bids = lob['bids']['lob']\r\n bid_improved = False\r\n bid_hit = False\r\n lob_best_bid_p = lob['bids']['bestp']\r\n lob_best_bid_q = None\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n lob_best_bid_q = lob['bids']['lob'][-1][1]\r\n if self.prev_best_bid_p < lob_best_bid_p :\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and ((self.prev_best_bid_p > lob_best_bid_p) or ((self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q))):\r\n # previous best bid was hit\r\n self.accepted_bids.append(self.prev_best_bid_p)\r\n bid_hit = True\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB has been emptied: was it cancelled or hit?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n bid_hit = False\r\n else:\r\n bid_hit = True\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n self.outstanding_asks = lob['asks']['lob']\r\n ask_improved = False\r\n ask_lifted = False\r\n lob_best_ask_p = lob['asks']['bestp']\r\n lob_best_ask_q = None\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p :\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and ((self.prev_best_ask_p < lob_best_ask_p) or ((self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q))):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n self.accepted_asks.append(self.prev_best_ask_p)\r\n ask_lifted = True\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: canceled or lifted?\r\n last_tape_item = lob['tape'][-1]\r\n if last_tape_item['type'] == 'Cancel' :\r\n ask_lifted = False\r\n else:\r\n ask_lifted = True\r\n\r\n\r\n #populate expected values\r\n if self.first_turn:\r\n # print \"populating\"\r\n self.first_turn = False\r\n for n in range(1, self.remaining_offer_ops):\r\n for m in range(1, self.holdings):\r\n if self.job == 'Bid':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_bid(m, n)\r\n\r\n if self.job == 'Ask':\r\n #BUYER\r\n self.values[m][n] = self.calc_p_ask(m, n)\r\n # print \"done\"\r\n\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n" }, { "alpha_fraction": 0.47004538774490356, "alphanum_fraction": 0.4791981875896454, "avg_line_length": 36.99705123901367, "blob_id": "16edf6058ae54d78e6d90086ead09ced171b10c6", "content_id": "d993dc096eb266b69e22ac725565b0eae0dc1688", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26440, "license_type": "permissive", "max_line_length": 141, "num_lines": 678, "path": "/ZhenZhang/source/IZIP_MLOFI.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\r\nfrom BSE_trader_agents import Trader;\r\nimport random\r\nimport math\r\n\r\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\r\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies\r\n\r\nclass Trader_IZIP_MLOFI(Trader):\r\n\r\n # ZIP init key param-values are those used in Cliff's 1997 original HP Labs tech report\r\n # NB this implementation keeps separate margin values for buying & selling,\r\n # so a single trader can both buy AND sell\r\n # -- in the original, traders were either buyers OR sellers\r\n\r\n def __init__(self, ttype, tid, balance, time, m):\r\n Trader.__init__(self, ttype, tid, balance, time)\r\n m_fix = 0.05\r\n m_var = 0.05\r\n self.job = None # this is 'Bid' or 'Ask' depending on customer order\r\n self.active = False # gets switched to True while actively working an order\r\n self.prev_change = 0 # this was called last_d in Cliff'97\r\n self.beta = 0.1 + 0.2 * random.random() # learning rate\r\n self.momntm = 0.3 * random.random() # momentum\r\n self.ca = 0.10 # self.ca & .cr were hard-coded in '97 but parameterised later\r\n self.cr = 0.10\r\n self.margin = None # this was called profit in Cliff'97\r\n self.margin_buy = -1.0 * (m_fix + m_var * random.random())\r\n self.margin_sell = m_fix + m_var * random.random()\r\n self.price = None\r\n self.limit = None\r\n # memory of best price & quantity of best bid and ask, on LOB on previous update\r\n self.prev_best_bid_p = None\r\n self.prev_best_bid_q = None\r\n self.prev_best_ask_p = None\r\n self.prev_best_ask_q = None\r\n # memory of worst prices from customer orders received so far\r\n self.worst_bidprice = None\r\n self.worst_askprice = None\r\n\r\n\r\n # variable for MLOFI\r\n self.last_lob = None;\r\n self.es_list = [];\r\n self.ds_list = [];\r\n\r\n #variable for ratio\r\n self.bids_volume_list = []\r\n self.asks_volume_list = []\r\n\r\n #variable\r\n self.m = m;\r\n\r\n def is_imbalance_significant(self, m,threshold):\r\n cb_list = [0 for i in range(m)]\r\n ab_list = []\r\n\r\n ca_list = [0 for i in range(m)]\r\n aa_list = []\r\n\r\n n = 1\r\n\r\n while len(self.bids_volume_list) >= n and len(self.asks_volume_list) >= n:\r\n for i in range(m):\r\n cb_list[i] += self.bids_volume_list[-n]['level' + str(i + 1)]\r\n ca_list[i] += self.asks_volume_list[-n]['level' + str(i + 1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n\r\n for i in range(m):\r\n temp1 = None\r\n temp2 = None\r\n if n == 1:\r\n temp1 = cb_list[i] + 1\r\n temp2 = ca_list[i] + 1\r\n else:\r\n temp1 = cb_list[i] / (n - 1) + 1\r\n temp2 = ca_list[i] / (n - 1) + 1\r\n ab_list.append(temp1)\r\n aa_list.append(temp2)\r\n\r\n v_bid = 0;\r\n v_ask = 0;\r\n for i in range(m):\r\n v_bid += math.exp(-0.5*i)*ab_list[i];\r\n v_ask += math.exp(-0.5*i)*aa_list[i];\r\n ratio = (v_bid-v_ask)/(v_bid+v_ask);\r\n\r\n # print self.bids_volume_list\r\n # print self.asks_volume_list\r\n # print ratio\r\n\r\n if(ratio>threshold or ratio<-threshold):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n def calc_bids_volume(self, lob, m, verbose):\r\n new_b = {}\r\n\r\n for i in range(1, m + 1):\r\n new_b['level' + str(i)] = self.cal_bids_n(lob, i)\r\n\r\n self.bids_volume_list.append(new_b)\r\n\r\n def cal_bids_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n return r_n\r\n\r\n def calc_asks_volume(self, lob, m, verbose):\r\n new_a = {}\r\n\r\n for i in range(1, m + 1):\r\n new_a['level' + str(i)] = self.cal_asks_n(lob, i);\r\n\r\n self.asks_volume_list.append(new_a)\r\n\r\n def cal_asks_n(self, lob, n):\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return q_n\r\n\r\n def calc_level_n_e(self, current_lob, n):\r\n b_n = 0\r\n r_n = 0\r\n a_n = 0\r\n q_n = 0\r\n\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n\r\n if (len(current_lob['bids']['lob']) < n):\r\n b_n = 0\r\n r_n = 0\r\n else:\r\n b_n = current_lob['bids']['lob'][n - 1][0]\r\n r_n = current_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['bids']['lob']) < n):\r\n b_n_1 = 0\r\n r_n_1 = 0\r\n else:\r\n b_n_1 = self.last_lob['bids']['lob'][n - 1][0]\r\n r_n_1 = self.last_lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(current_lob['asks']['lob']) < n):\r\n a_n = 0\r\n q_n = 0\r\n else:\r\n a_n = current_lob['asks']['lob'][n - 1][0]\r\n q_n = current_lob['asks']['lob'][n - 1][1]\r\n\r\n if (len(self.last_lob['asks']['lob']) < n):\r\n a_n_1 = 0\r\n q_n_1 = 0\r\n else:\r\n a_n_1 = self.last_lob['asks']['lob'][n - 1][0]\r\n q_n_1 = self.last_lob['asks']['lob'][n - 1][1]\r\n\r\n delta_w = 0;\r\n\r\n if (b_n > b_n_1):\r\n delta_w = r_n\r\n elif (b_n == b_n_1):\r\n delta_w = r_n - r_n_1\r\n else:\r\n delta_w = -r_n_1\r\n\r\n delta_v = 0\r\n if (a_n > a_n_1):\r\n delta_v = -q_n_1\r\n elif (a_n == a_n_1):\r\n delta_v = q_n - q_n_1\r\n else:\r\n delta_v = q_n\r\n\r\n return delta_w - delta_v\r\n\r\n def calc_es(self, lob, m, verbose):\r\n new_e = {}\r\n for i in range(1, m + 1):\r\n new_e['level' + str(i)] = self.calc_level_n_e(lob, i)\r\n\r\n self.es_list.append(new_e)\r\n\r\n def calc_ds(self, lob, m, verbose):\r\n new_d = {}\r\n\r\n for i in range(1, m + 1):\r\n new_d['level' + str(i)] = self.cal_depth_n(lob, i)\r\n\r\n self.ds_list.append(new_d)\r\n\r\n def cal_depth_n(self, lob, n):\r\n\r\n if (len(lob['bids']['lob']) < n):\r\n r_n = 0\r\n else:\r\n r_n = lob['bids']['lob'][n - 1][1]\r\n\r\n if (len(lob['asks']['lob']) < n):\r\n q_n = 0\r\n else:\r\n q_n = lob['asks']['lob'][n - 1][1]\r\n return (r_n + q_n) / 2\r\n\r\n\r\n\r\n def __str__(self):\r\n s = '%s, job=, %s, ' % (self.tid, self.job)\r\n if self.active == True:\r\n s = s + 'actv=,T, '\r\n else:\r\n s = s + 'actv=,F, '\r\n if self.margin == None:\r\n s = s + 'mrgn=,N, '\r\n else:\r\n s = s + 'mrgn=,%5.2f, ' % self.margin\r\n s = s + 'lmt=,%s, price=,%s, bestbid=,%s,@,%s, bestask=,%s,@,%s, wrstbid=,%s, wrstask=,%s' % \\\r\n (self.limit, self.price, self.prev_best_bid_q, self.prev_best_bid_p, self.prev_best_ask_q,\r\n self.prev_best_ask_p, self.worst_bidprice, self.worst_askprice)\r\n return (s)\r\n\r\n def getorder(self, time, countdown, lob, verbose):\r\n\r\n if verbose: print('ZIP getorder(): LOB=%s' % lob)\r\n\r\n # random coefficient, multiplier on trader's own estimate of worst possible bid/ask prices\r\n # currently in arbitrarily chosen range [2, 5]\r\n worst_coeff = 2 + (3 * random.random())\r\n\r\n if len(self.orders) < 1:\r\n self.active = False\r\n order = None\r\n else:\r\n self.active = True\r\n self.limit = self.orders[0].price\r\n self.job = self.orders[0].atype\r\n if self.job == 'Bid':\r\n # currently a buyer (working a bid order)\r\n self.margin = self.margin_buy\r\n # what is the worst bid price on the LOB right now?\r\n if len(lob['bids']['lob']) > 0:\r\n # take price of final entry on LOB\r\n worst_bid = lob['bids']['lob'][-1][0]\r\n else:\r\n # local pessimistic estimate of the worst bid price (own version of stub quote)\r\n worst_bid = max(1, int(self.limit / worst_coeff))\r\n if self.worst_bidprice == None:\r\n self.worst_bidprice = worst_bid\r\n elif self.worst_bidprice > worst_bid:\r\n self.worst_bidprice = worst_bid\r\n else:\r\n # currently a seller (working a sell order)\r\n self.margin = self.margin_sell\r\n # what is the worst ask price on the LOB right now?\r\n if len(lob['asks']['lob']) > 0:\r\n # take price of final entry on LOB\r\n worst_ask = lob['asks']['lob'][-1][0]\r\n else:\r\n # local pessimistic estimate of the worst ask price (own version of stub quote)\r\n worst_ask = int(self.limit * worst_coeff)\r\n if self.worst_askprice == None:\r\n self.worst_askprice = worst_ask\r\n elif self.worst_askprice < worst_ask:\r\n self.worst_askprice = worst_ask\r\n\r\n quoteprice = int(self.limit * (1 + self.margin))\r\n\r\n def imbalance_alter(quoteprice_aa, lob, countdown, m):\r\n\r\n mlofi_list = [0 for i in range(m)]\r\n cd_list = [0 for i in range(m)]\r\n ad_list = []\r\n n = 1\r\n\r\n while len(self.es_list) >= n:\r\n for i in range(m):\r\n mlofi_list[i] += self.es_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n n = 1\r\n\r\n while len(self.ds_list) >= n:\r\n for i in range(m):\r\n cd_list[i] += self.ds_list[-n]['level' + str(i+1)]\r\n n += 1\r\n if n >= 11:\r\n break\r\n\r\n for i in range(m):\r\n temp = None\r\n if n == 1:\r\n temp = cd_list[i]+1\r\n else:\r\n temp = cd_list[i]/(n-1)+1\r\n ad_list.append(temp)\r\n\r\n c = 10\r\n decay = 1\r\n offset = 0\r\n\r\n for i in range(m):\r\n offset += int(mlofi_list[i]*c*pow(decay,i)/ ad_list[i])\r\n\r\n\r\n benchmark = quoteprice_aa;\r\n if(lob['midprice'] != None):\r\n benchmark = lob['midprice']\r\n # print 'midprice is %d' % benchmark\r\n\r\n quoteprice_iaa = quoteprice_aa + 0.8 * (benchmark + offset - quoteprice_aa)\r\n if self.job == 'Bid' and quoteprice_iaa > self.limit:\r\n quoteprice_iaa = self.limit\r\n if self.job == 'Ask' and quoteprice_iaa < self.limit:\r\n quoteprice_iaa = self.limit\r\n\r\n if countdown < 0.3 :\r\n print \"insert\"\r\n if self.job == 'Bid' and (len(lob['asks']['lob']) >= 1) and lob['asks']['lob'][0][0] < self.limit:\r\n quoteprice_iaa = lob['asks']['lob'][0][0]+1\r\n if self.job == 'Ask' and (len(lob['bids']['lob']) >= 1) and lob['bids']['lob'][0][0] > self.limit:\r\n quoteprice_iaa = lob['bids']['lob'][0][0]-1\r\n\r\n\r\n if self.job == 'Bid' and quoteprice_iaa < bse_sys_minprice:\r\n quoteprice_iaa = bse_sys_minprice + 1\r\n if self.job == 'Ask' and quoteprice_iaa > bse_sys_maxprice:\r\n quoteprice_iaa = bse_sys_maxprice - 1\r\n\r\n return quoteprice_iaa\r\n\r\n # print \"before\"\r\n # print quoteprice\r\n # if(self.is_imbalance_significant(self.m,0.6)):\r\n # print \"abvious\"\r\n # quoteprice_izip = imbalance_alter(quoteprice, lob, countdown, self.m)\r\n # else:\r\n # print \"not abvious\"\r\n # quoteprice_izip = quoteprice\r\n quoteprice_izip = imbalance_alter(quoteprice, lob, countdown, self.m)\r\n # print \"after\"\r\n # print quoteprice_izip\r\n\r\n\r\n\r\n self.price = quoteprice_izip\r\n\r\n order = Order(self.tid, self.job, \"LIM\", quoteprice_izip, self.orders[0].qty, time, None, -1)\r\n self.lastquote = order\r\n\r\n return order\r\n\r\n # update margin on basis of what happened in market\r\n def respond(self, time, lob, trade, verbose):\r\n # ZIP trader responds to market events, altering its margin\r\n # does this whether it currently has an order to work or not\r\n\r\n\r\n if (self.last_lob == None):\r\n self.last_lob = lob\r\n else:\r\n self.calc_es(lob, self.m, verbose)\r\n self.calc_ds(lob, self.m, verbose)\r\n self.calc_bids_volume(lob, self.m, verbose)\r\n self.calc_asks_volume(lob, self.m, verbose)\r\n self.last_lob = lob;\r\n\r\n def target_up(price):\r\n # generate a higher target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 + (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel + ptrb_abs, 0))\r\n if target == price: target = price + 1 # enforce minimal difference\r\n # print('TargetUp: %d %d\\n' % (price, target))\r\n return (target)\r\n\r\n def target_down(price):\r\n # generate a lower target price by randomly perturbing given price\r\n ptrb_abs = self.ca * random.random() # absolute shift\r\n ptrb_rel = price * (1.0 - (self.cr * random.random())) # relative shift\r\n target = int(round(ptrb_rel - ptrb_abs, 0))\r\n if target == price: target = price - 1 # enforce minimal difference\r\n # print('TargetDn: %d %d\\n' % (price,target))\r\n return (target)\r\n\r\n def microshade(microprice, price):\r\n # shade in the direction of the microprice\r\n microweight = 0\r\n if microprice != None:\r\n shaded = ((microweight * microprice) + ((1 - microweight) * price))\r\n else:\r\n shaded = price\r\n # print('Microshade: micro=%s price=%s shaded=%s' % (microprice, price, shaded))\r\n return (shaded)\r\n\r\n def willing_to_trade(price):\r\n # am I willing to trade at this price?\r\n willing = False\r\n if self.job == 'Bid' and self.active and self.price >= price:\r\n willing = True\r\n if self.job == 'Ask' and self.active and self.price <= price:\r\n willing = True\r\n return willing\r\n\r\n def profit_alter(*argv):\r\n # this has variable number of parameters\r\n # if passed a single numeric value, that's the target price\r\n # if passed three numeric values, that's the price, beta (learning rate), and momentum\r\n if len(argv) == 1:\r\n price = argv[0]\r\n beta = self.beta\r\n momntm = self.momntm\r\n elif len(argv) == 3:\r\n price = argv[0]\r\n beta = argv[1]\r\n momntm = argv[2]\r\n else:\r\n sys.stdout.flush()\r\n sys.exit('Fail: ZIP profit_alter given wrong number of parameters')\r\n\r\n # print('profit_alter: price=%s beta=%s momntm=%s' % (price, beta, momntm))\r\n oldprice = self.price\r\n diff = price - oldprice\r\n change = ((1.0 - self.momntm) * (self.beta * diff)) + (self.momntm * self.prev_change)\r\n self.prev_change = change\r\n newmargin = ((self.price + change) / self.limit) - 1.0\r\n\r\n if self.job == 'Bid':\r\n margin = min(newmargin, 0)\r\n self.margin_buy = margin\r\n self.margin = margin\r\n else:\r\n margin = max(0, newmargin)\r\n self.margin_sell = margin\r\n self.margin = margin\r\n\r\n # set the price from limit and profit-margin\r\n self.price = int(round(self.limit * (1.0 + self.margin), 0))\r\n # print('old=%d diff=%d change=%d lim=%d price = %d\\n' % (oldprice, diff, change, self.limit, self.price))\r\n\r\n if verbose and trade != None: print('respond() [ZIP] time=%s tid=%s, trade=%s LOB[bids]=%s LOB[asks]=%s' %\r\n (time, self.tid, trade, lob[\"bids\"], lob[\"asks\"]))\r\n\r\n # what, if anything, has happened on the bid LOB?\r\n\r\n # if trade != None: print('ZIP respond() trade=%s' % trade)\r\n\r\n bid_improved = False\r\n bid_hit = False\r\n\r\n if len(lob['bids']['lob']) > 0:\r\n lob_best_bid_p = lob['bids']['lob'][0][0]\r\n else:\r\n lob_best_bid_p = None\r\n\r\n lob_best_bid_q = None # default assumption\r\n\r\n if lob_best_bid_p != None:\r\n # non-empty bid LOB\r\n\r\n if self.prev_best_bid_p > lob_best_bid_p:\r\n best_bid_p_decreased = True\r\n else:\r\n best_bid_p_decreased = False\r\n\r\n if (self.prev_best_bid_p == lob_best_bid_p) and (self.prev_best_bid_q > lob_best_bid_q):\r\n same_p_smaller_q = True\r\n else:\r\n same_p_smaller_q = False\r\n\r\n lob_best_bid_q = lob['bids']['lob'][0][1]\r\n\r\n if self.prev_best_bid_p < lob_best_bid_p:\r\n # best bid has improved\r\n # NB doesn't check if the improvement was by self\r\n bid_improved = True\r\n elif trade != None and (best_bid_p_decreased or same_p_smaller_q):\r\n # there WAS a trade and either...\r\n # ... (best bid price has gone DOWN) or (best bid price is same but quantity at that price has gone DOWN)\r\n # then assume previous best bid was hit\r\n bid_hit = True\r\n\r\n elif self.prev_best_bid_p != None:\r\n # the bid LOB is empty now but was not previously: so was it canceled or lifted?\r\n if trade != None:\r\n # a trade has occurred and the previously nonempty ask LOB is now empty\r\n # so assume best ask was lifted\r\n bid_hit = True\r\n else:\r\n bid_hit = False\r\n\r\n if verbose: print(\"LOB[bids]=%s bid_improved=%s bid_hit=%s\" % (lob['bids'], bid_improved, bid_hit))\r\n\r\n # what, if anything, has happened on the ask LOB?\r\n\r\n ask_improved = False\r\n ask_lifted = False\r\n\r\n if len(lob['asks']['lob']) > 0:\r\n lob_best_ask_p = lob['asks']['lob'][0][0]\r\n else:\r\n lob_best_ask_p = None\r\n\r\n lob_best_ask_q = None\r\n\r\n if lob_best_ask_p != None:\r\n # non-empty ask LOB\r\n\r\n if self.prev_best_ask_p < lob_best_ask_p:\r\n best_ask_p_increased = True\r\n else:\r\n best_ask_p_increased = False\r\n\r\n if (self.prev_best_ask_p == lob_best_ask_p) and (self.prev_best_ask_q > lob_best_ask_q):\r\n same_p_smaller_q = True\r\n else:\r\n same_p_smaller_q = False\r\n\r\n lob_best_ask_q = lob['asks']['lob'][0][1]\r\n if self.prev_best_ask_p > lob_best_ask_p:\r\n # best ask has improved -- NB doesn't check if the improvement was by self\r\n ask_improved = True\r\n elif trade != None and (best_ask_p_increased or same_p_smaller_q):\r\n # trade happened and best ask price has got worse, or stayed same but quantity reduced -- assume previous best ask was lifted\r\n ask_lifted = True\r\n\r\n elif self.prev_best_ask_p != None:\r\n # the ask LOB is empty now but was not previously: so was it canceled or lifted?\r\n if trade != None:\r\n # a trade has occurred and the previously nonempty ask LOB is now empty\r\n # so assume best ask was lifted\r\n ask_lifted = True\r\n else:\r\n ask_lifted = False\r\n\r\n if verbose: print(\"LOB[asks]=%s ask_improved=%s ask_lifted=%s\" % (lob['asks'], ask_improved, ask_lifted))\r\n\r\n if verbose and (bid_improved or bid_hit or ask_improved or ask_lifted):\r\n print('ZIP respond() B_improved=%s; B_hit=%s A_improved=%s, A_lifted=%s' % (\r\n bid_improved, bid_hit, ask_improved, ask_lifted))\r\n print('Trade=%s\\n' % trade)\r\n\r\n # we want to know: did a deal just happen?\r\n # if not, did the most recent bid\r\n\r\n deal = bid_hit or ask_lifted\r\n\r\n # previously...\r\n # when raising margin, tradeprice = trade['price'], targetprice = f(tradeprice) &\r\n # i.e. target price will be calculated relative to price of most recent transaction\r\n # and when lowering margin, targetprice = f(best_price_on_counterparty_side_of_LOB) or\r\n # or if LOB empty then targetprice = f(worst possible counterparty quote) <-- a system constant\r\n\r\n # new in this version:\r\n # take account of LOB's microprice if it is defined (if not, use trade['price'] as before)\r\n\r\n midp = lob['midprice']\r\n microp = lob['microprice']\r\n\r\n # KLUDGE for TESTING\r\n if time > 79: microp = 145\r\n\r\n if microp != None and midp != None:\r\n imbalance = microp - midp\r\n else:\r\n imbalance = 0 # uses zero instead of None because a zero imbalance reverts ZIP to original form\r\n\r\n target_price = None # default assumption\r\n\r\n # print('self.job=%s' % self.job)\r\n\r\n if self.job == 'Ask':\r\n # seller\r\n if deal:\r\n if verbose: print ('trade', trade)\r\n tradeprice = trade['price'] # price of most recent transaction\r\n # print('tradeprice=%s lob[microprice]=%s' % (tradeprice, lob['microprice']))\r\n # shadetrade = microshade(lob['microprice'], tradeprice)\r\n # refprice = shadetrade\r\n refprice = tradeprice\r\n\r\n if self.price <= tradeprice:\r\n # could sell for more? raise margin\r\n target_price = target_up(refprice)\r\n profit_alter(target_price)\r\n elif ask_lifted and self.active and not willing_to_trade(tradeprice):\r\n # previous best ask was hit,\r\n # but this trader wouldn't have got the deal cos price to high,\r\n # and still working a customer order, so reduce margin\r\n target_price = target_down(refprice)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for a target price higher than best bid\r\n # print('lob_best_bid_p=%s lob[microprice]=%s' % (lob_best_bid_p, lob['microprice']))\r\n # refprice = microshade(lob['microprice'], lob_best_bid_p)\r\n refprice = lob_best_bid_p\r\n\r\n if ask_improved and self.price > lob_best_bid_p:\r\n if lob_best_bid_p != None:\r\n target_price = target_up(lob_best_bid_p)\r\n else:\r\n if self.worst_askprice != None:\r\n target_price = self.worst_askprice\r\n print('worst_askprice = %s' % self.worst_askprice)\r\n target_price = None # todo: does this stop the price-spikes?\r\n else:\r\n target_price = None\r\n # target_price = lob['asks']['worstp'] # stub quote\r\n if target_price != None:\r\n print('PA1: tp=%s' % target_price)\r\n profit_alter(target_price)\r\n\r\n if self.job == 'Bid':\r\n # buyer\r\n if deal:\r\n tradeprice = trade['price']\r\n # shadetrade = microshade(lob['microprice'], tradeprice)\r\n # refprice = shadetrade\r\n refprice = tradeprice\r\n\r\n if lob['microprice'] != None and lob['midprice'] != None:\r\n delta = lob['microprice'] - lob['midprice']\r\n # refprice = refprice + delta\r\n\r\n if self.price >= tradeprice:\r\n # could buy for less? raise margin (i.e. cut the price)\r\n target_price = target_down(refprice)\r\n profit_alter(target_price)\r\n elif bid_hit and self.active and not willing_to_trade(tradeprice):\r\n # wouldn't have got this deal, and still working a customer order,\r\n # so reduce margin\r\n target_price = target_up(refprice)\r\n profit_alter(target_price)\r\n else:\r\n # no deal: aim for target price lower than best ask\r\n refprice = microshade(lob['microprice'], lob_best_ask_p)\r\n if bid_improved and self.price < lob_best_ask_p:\r\n if lob_best_ask_p != None:\r\n target_price = target_down(lob_best_ask_p)\r\n else:\r\n if self.worst_bidprice != None:\r\n target_price = self.worst_bidprice\r\n target_price = None\r\n else:\r\n target_price = None\r\n # target_price = lob['bids']['worstp'] # stub quote\r\n if target_price != None:\r\n # print('PA2: tp=%s' % target_price)\r\n profit_alter(target_price)\r\n\r\n # print('time,%f,>>>,microprice,%s,>>>,target_price,%s' % (time, lob['microprice'], target_price))\r\n\r\n # remember the best LOB data ready for next response\r\n self.prev_best_bid_p = lob_best_bid_p\r\n self.prev_best_bid_q = lob_best_bid_q\r\n self.prev_best_ask_p = lob_best_ask_p\r\n self.prev_best_ask_q = lob_best_ask_q\r\n\r\n##########################---trader-types have all been defined now--################" }, { "alpha_fraction": 0.46308261156082153, "alphanum_fraction": 0.4741261899471283, "avg_line_length": 49.83528137207031, "blob_id": "51bb0d2b967d2b5e6b66e6f1b207014aa8f74917", "content_id": "6c495e4aeebc466e071eb4db5a267cbe473ee9d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104314, "license_type": "permissive", "max_line_length": 250, "num_lines": 2052, "path": "/ZhenZhang/source/BSE2.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#\n# BSE: The Bristol Stock Exchange\n#\n# Version 2.0Beta: Nov 20th, 2018.\n# Version 1.4: August 30th, 2018.\n# Version 1.3: July 21st, 2018.\n# Version 1.2: November 17th, 2012.\n#\n# Copyright (c) 2012-2019, Dave Cliff\n#\n#\n# ------------------------\n#\n# MIT Open-Source License:\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial\n# portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# ------------------------\n#\n#\n#\n# BSE is a very simple simulation of automated execution traders\n# operating on a very simple model of a limit order book (LOB) exchange\n#\n# major simplifications in this version:\n# (a) only one financial instrument being traded\n# (b) each trader can have max of one order per single orderbook.\n# (c) simply processes each order in sequence and republishes LOB to all traders\n# => no issues with exchange processing latency/delays or simultaneously issued orders.\n#\n# NB this code has been written to be readable/intelligible, not efficient!\n\n# could import pylab here for graphing etc\n\nimport sys\nimport math\nimport random\nimport csv\nfrom datetime import datetime\n\nfrom BSE2_msg_classes import Assignment, Order, Exch_msg\nfrom BSE_trader_agents import Trader_ISHV, Trader_Shaver,Trader_Giveaway,Trader_AA, Trader_Sniper, Trader_ZIC,Trader_ZIP,Trader_OAA #, Trader_IAAB\nfrom IZIP_MLOFI import Trader_IZIP_MLOFI\nfrom IAA_MLOFI import Trader_IAA_MLOFI\nfrom Simple_MLOFI import Trader_Simple_MLOFI\nfrom GDX import Trader_GDX\nfrom IGDX_MLOFI import Trader_IGDX_MLOFI\nfrom IAA_NEW import Trader_IAA_NEW\nfrom ZZISHV import Trader_ZZISHV\n\n# from BSE2_unittests import test_all\n# from BSE2_dev import proc_OXO proc_ICE\n\n\nbse_sys_minprice = 1 # minimum price in the system, in cents/pennies\nbse_sys_maxprice = 200 # maximum price in the system, in cents/pennies: Todo -- eliminate reliance on this\nticksize = 1 # minimum change in price, in cents/pennies\n\n\n\n# Orderbook_half is one side of the book:\n# The internal records of the exchange include the ID of the trader who issued the order, arrival time, etc.\n# The externally published LOB aggregates and anonymizes these details.\n\nclass Orderbook_half:\n\n def __init__(self, booktype, worstprice):\n\n self.booktype = booktype\n\n def bid_equaltoorbetterthan(p1, p2, verbose):\n if verbose: print(\"bid_equaltoorbetterthan: %d >= %d ?\" % (p1, p2))\n if p1 >= p2: return(True)\n else: return(False)\n\n def ask_equaltoorbetterthan(p1, p2, verbose):\n if verbose: print(\"ask_equaltoorbetterthan: %d <= %d ?\" % (p1, p2))\n if p1 <= p2: return(True)\n else: return(False)\n\n # function for deciding whether price A is equal to or better than price B\n if self.booktype == 'Bid':\n self.equaltoorbetterthan = bid_equaltoorbetterthan\n elif self.booktype == 'Ask':\n self.equaltoorbetterthan = ask_equaltoorbetterthan\n else: sys.exit('Fail: Orderbook_half __init__ passed booktype=%s', str(booktype))\n\n # dictionary of live orders received, indexed by Order ID\n self.orders = {}\n # limit order book, exchange's internal list, ordered by price, with associated order info\n self.lob = []\n # anonymized LOB, aggregated list with only price/qty info: as published to market observers\n self.lob_anon = []\n # list of orders \"resting\" at the exchange, i.e. orders that persist for some time (e.g. AON, ICE)\n self.resting = []\n # On-Close & On-Open hold LIM & MKT orders that execute at market open and close (MOO, MOC, LOO, LOC)\n self.on_close = []\n self.on_open = []\n # OXO stores details of \"other\" for OSO and OCO orders\n self.oxo = []\n # summary stats\n # self.best_price = None\n self.worst_price = worstprice\n # self.n_orders = 0 # how many orders?\n # self.lob_depth = 0 # how many different prices on lob?\n\n\n def __str__(self):\n v = 'OB_H> '\n s = '\\n' + v + self.booktype + '\\n'\n s = s + v + 'Orders: '\n for oid in self.orders:\n s = s + str(oid) + '=' + str(self.orders[oid]) + ' '\n s = s + '\\n'\n s = s + v + 'LOB:\\n'\n for row in self.lob:\n s = s + '[P=%d,[' % row[0] # price\n for order in row[1]:\n s = s + '[T=%5.2f Q=%d %s OID:%d]' % (order[0], order[1], order[2], order[3])\n s = s + ']]\\n'\n s = s + v + 'LOB_anon' + str(self.lob_anon) + '\\n'\n s = s + v + 'MOB:'\n s = s + '\\n'\n\n return s\n\n\n def anonymize_lob(self, verbose):\n # anonymize a lob, strip out order details, format as a sorted list\n # sorting is best prices at the front (LHS) of the list\n self.lob_anon = []\n if self.booktype == 'Bid':\n for price in sorted(self.lob, reverse=True):\n qty = self.lob[price][0]\n self.lob_anon.append([price, qty])\n elif self.booktype == 'Ask':\n for price in sorted(self.lob):\n qty = self.lob[price][0]\n self.lob_anon.append([price, qty])\n else:\n sys.exit('Fail: Orderbook_half __init__ passed booktype=%s', str(booktype))\n if verbose: print self.lob_anon\n\n\n def build_lob(self, verbose):\n # take a list of orders and build a limit-order-book (lob) from it\n # NB the exchange needs to know arrival times and trader-id associated with each order\n # returns lob as a list, sorted by price best to worst, orders at same price sorted by arrival time\n # also builds aggregated & anonymized version (just price/quantity, sorted, as a list) for publishing to traders\n\n # First builds lob as a dictionary indexed by price\n lob = {}\n for oid in self.orders:\n order = self.orders.get(oid)\n price = int(order.price)\n if price in lob:\n # update existing entry\n qty = lob[price][0]\n orderlist = lob[price][1]\n orderlist.append([order.time, order.qty, order.tid, order.orderid])\n lob[price] = [qty + order.qty, orderlist]\n else:\n # create a new dictionary entry\n lob[price] = [order.qty, [[order.time, order.qty, order.tid, order.orderid]]]\n\n self.lob = []\n for price in lob:\n orderlist = lob[price][1]\n orderlist.sort() #orders are sorted by arrival time\n self.lob.append([price, orderlist]) #appends only the price and the order-list\n # now sort by price: order depends on book type\n if self.booktype == 'Bid':\n self.lob.sort(reverse=True)\n elif self.booktype == 'Ask':\n self.lob.sort()\n else:\n sys.exit('Fail: Orderbook_half __init__ passed booktype=%s', str(booktype))\n\n # create anonymized version of LOB for publication\n self.lob_anon = []\n if self.booktype == 'Bid':\n for price in sorted(lob, reverse=True):\n qty = lob[price][0]\n self.lob_anon.append([price, qty])\n else:\n for price in sorted(lob):\n qty = lob[price][0]\n self.lob_anon.append([price, qty])\n\n if verbose: print self.lob_anon\n\n # record best price and associated trader-id\n if len(self.lob) > 0 :\n if self.booktype == 'Bid':\n self.best_price = self.lob_anon[-1][0] #assumes reverse order COME BACK HERE\n else :\n self.best_price = self.lob_anon[0][0]\n else :\n self.best_price = None\n\n if verbose: print self.lob\n\n\n def book_add(self, order, verbose):\n # add an order to the master list holding the orders\n if verbose: print('>book_add %s' % (order))\n self.orders[order.orderid] = order\n self.n_orders = len(self.orders)\n # reconstruct the LOB -- from scratch (inefficient)\n self.build_lob(verbose)\n return None #null response\n\n\n def book_CAN(self, time, order, pool_id, verbose):\n # delete (CANcel) an order from the dictionary holding the orders\n\n def add_tapeitem(eventlist, pool_id, time, oid, otype, qty, verbose):\n # add_tapeitem(): add an event to list of events that will be written to tape\n tape_event = {'pool_id':pool_id, 'type':'CAN', 'time':time, 'oid':oid, 'otype':otype, 'o_qty':qty}\n eventlist.append(tape_event)\n if verbose: print('book_CAN.add_tapeitem() trans_event=%s' % tape_event)\n\n tape_events=[]\n\n if verbose:\n print('>OrderbookHalf.book_CAN %s' % order)\n for ord in self.orders: print(\"{%s: %s}\" % (ord,str(self.orders[ord])))\n\n oid = order.orderid\n if len(self.orders)>0 and (self.orders.get(oid) != None) :\n if verbose: print('Deleting order %s' % oid)\n o_qty = self.orders[oid].qty\n o_type = self.booktype\n del(self.orders[oid])\n self.n_orders = len(self.orders)\n # reconstruct the LOB -- from scratch (inefficient)\n self.build_lob(verbose)\n if verbose: print('<book_CAN %s' % self.orders)\n\n tmsg = Exch_msg(order.tid, oid, \"CAN\", [], None, 0, 0)\n add_tapeitem(tape_events, pool_id, time, oid, o_type, o_qty, verbose)\n\n return {\"TraderMsgs\":[tmsg], \"TapeEvents\":tape_events}\n else:\n print oid\n print 'NOP' # no operation -- order ID not in the order dictionary\n sys.exit('Fail: book_CAN() attempts to delete nonexistent order ')\n\n\n def book_take(self, time, order, pool_id, verbose):\n # process the order by taking orders off the LOB, consuming liquidity at the top of the book\n # this is where (MKT, IOC, FOK, AON) orders get matched and execute\n # returns messages re transactions, to be sent to traders involved; and a list of events to write to the tape\n # MKT order consumes the specified quantity, if available: partial fills allowed; ignores the price (so watch out for loss-making trades)\n # FOK only completes if it can consume the specified quantity at prices equal to or better than the specified price\n # IOC executes as much as it can of the specified quantity; allows partial fill: unfilled portion of order is cancelled\n # AON is like FOK but rests at the exchange until either (a) it can do complete fill or (b) clock reaches specified expiry time, at which point order cancelled.\n # NB the cancellations are not written to the tape, because they do not take liquidity away from the LOB\n\n\n def add_msg(msglist, tid, oid, etype, transactions, rev_order, fee, verbose):\n # add_msg(): add a message to list of messages from exchange back to traders\n # each msg tells trader [tid] that [OID] resulted in an event-type from [PART|FILL|FAIL]\n # if PART then also sends back [revised order] -- telling the trader what the LOB retains as the unfilled portion\n # if FILL then [revised order] is None\n # message concludes with bank-balance details: exchange fee & trader's balance at exchange\n msg = Exch_msg(tid, oid, etype, transactions, rev_order, fee, 0)\n msglist.append(msg)\n if verbose: print(msg)\n\n\n def add_tapeitem(eventlist, pool_id, eventtype, time, price, qty, party_from, party_to, verbose):\n # add_tapeitem(): add an event to list of events that will be written to tape\n # event type within book_take should be 'Trade'\n tape_event = { 'pool_id': pool_id,\n 'type': eventtype,\n 'time': time,\n 'price': price,\n 'qty': qty,\n 'party1': party_from,\n 'party2': party_to}\n eventlist.append(tape_event)\n if verbose: print('add_tapeitem() tape_event=%s' % tape_event)\n\n\n msg_list = [] # details of orders consumed from the LOB when filling this order\n trnsctns = [] # details of transactions resulting from this incoming order walking the book\n tape_events = [] # details of transaction events to be written onto tape\n qty_filled = 0 # how much of this order have we filled so far?\n fee = 0 # exchange fee charged for processing this order (taking liquidity, wrt maker-taker)\n\n if verbose: print('>book_take(): order=%s, lob=%s' % (order, self.lob))\n\n\n # initial checks, return FAIL if there is simply no hope of executing this order\n\n if len(self.lob) == 0:\n # no point going any further; LOB is empty\n add_msg(msg_list, order.tid, order.orderid, \"FAIL\", [], None, fee, verbose)\n return {\"TraderMsgs\": msg_list, \"TapeEvents\": tape_events}\n\n # how deep is the book? (i.e. what is cumulative qty available) at this order's indicated price level?\n depth = 0\n for level in self.lob_anon:\n if self.equaltoorbetterthan(level[0], order.price, verbose):\n depth += level[1]\n else: # we're past the level in the LOB where the prices are good for this order\n break\n\n if order.ostyle == \"FOK\" or order.ostyle == \"AON\":\n # FOK and AON require a complete fill\n # so we first check that this order can in principle be filled: is there enough liquidity available?\n if depth < order.qty:\n # there is not enough depth at prices that allow this order to completely fill\n add_msg(msg_list, order.tid, order.oid, \"FAIL\", [], None, fee, verbose)\n # NB here book_take() sends a msg back that an AON order is FAIL, that needs to be picked up by the\n # exchange logic and not passed back to the trader concerned, unless the AON has actually timed out\n return {\"TraderMsgs\": msg_list, \"TapeEvents\": tape_events}\n\n if order.ostyle == \"IOC\" and depth < 1 :\n # IOC order is a FAIL because there is no depth at all for the indicated price\n add_msg(msg_list, order.tid, order.orderid, \"FAIL\", [], None, fee, verbose)\n return {\"TraderMsgs\": msg_list, \"TapeEvents\": tape_events}\n\n\n # we only get this far if...\n # LOB is not empty\n # order is FOK or AON (complete fill only) -- we know there's enough depth to complete\n # order is MKT (allows partial fill, ignores prices, stops when indicated quantity is reached or LOB is empty)\n # order is IOC (allows partial fill, aims for indicated quantity but stops when price-limit is reached or LOB is empty) and LOB depth at price > 0\n\n if order.otype == \"Bid\":\n tid_to = order.tid\n oid_to = order.orderid\n elif order.otype == \"Ask\":\n tid_from = order.tid\n oid_from = order.orderid\n else: # this shouldn't happen\n sys.exit('>book_take: order.otype=%s in book_take' % order.otype)\n\n # make a copy of the order-list and lobs as it initially stands\n # used for reconciling fills and when order is abandoned because it can't complete (e.g. FOK, AON)\n # initial_orders = self.orders\n\n # work this order by \"walking the book\"\n\n qty_remaining = order.qty\n\n best_lob_price = self.lob[0][0]\n\n good_price = True\n\n if order.ostyle != \"MKT\":\n good_price = self.equaltoorbetterthan(best_lob_price, order.price, verbose)\n\n # this while loop consumes the top of the LOB while trying to fill the order\n while good_price and (qty_remaining > 0) and (len(self.orders)>0):\n\n good_price = self.equaltoorbetterthan(self.lob[0][0], order.price, verbose)\n\n if verbose:\n print('BK_TAKE: qty_rem=%d; lob=%s; good_price=%s' % (qty_remaining, str(self.lob), good_price))\n sys.stdout.flush()\n\n if order.ostyle == \"IOC\" and (not good_price):\n # current LOB best price is unacceptable for IOC\n if verbose: print(\n 'BK_TAKE: IOC breaks out of while loop (otype=%s best LOB price = %d; order price = %d)' %\n (order.otype, self.lob[0][0], order.price))\n break # out of the while loop\n\n best_lob_price = self.lob[0][0]\n best_lob_orders = self.lob[0][1]\n best_lob_order = best_lob_orders[0]\n best_lob_order_qty = best_lob_order[1]\n best_lob_order_tid = best_lob_order[2]\n best_lob_order_oid = best_lob_order[3]\n if order.otype == \"Bid\":\n tid_from = best_lob_order_tid\n oid_from = best_lob_order_oid\n elif order.otype == \"Ask\":\n tid_to = best_lob_order_tid\n oid_to = best_lob_order_oid\n\n if verbose: print('BK_TAKE: best_lob _price=%d _order=%s qty=%d oid_from=%d oid_to=%d tid_from=%s tid_to=%s\\n' %\n (best_lob_price, best_lob_order, best_lob_order_qty, oid_from, oid_to, tid_from, tid_to))\n\n\n # walk the book: does this order consume current best order on book?\n if best_lob_order_qty >= qty_remaining:\n\n # incoming liquidity-taking order is completely filled by consuming some/all of best order on LOB\n qty = qty_remaining\n price = best_lob_price\n qty_filled = qty_filled + qty\n best_lob_order_qty = best_lob_order_qty - qty\n # the incoming order is a complete fill\n transaction = {\"Price\":price, \"Qty\":qty}\n trnsctns.append(transaction)\n\n # add a message to the list of outgoing messages from exch to traders\n add_msg(msg_list, order.tid, order.orderid, \"FILL\", trnsctns, None, fee, verbose)\n\n # add a record of this to the tape (NB this identifies both parties to the trade, so only do it once)\n add_tapeitem(tape_events, pool_id, 'Trade', time, price, qty, tid_from, tid_to, verbose)\n\n # so far have dealt with effect of match on incoming order\n # now need to deal with effect of match on best order on LOB (the other side of the deal)\n if best_lob_order_qty > 0:\n # the best LOB order is only partially consumed\n best_lob_order[1] = best_lob_order_qty\n best_lob_orders[0] = best_lob_order\n self.lob[0][1] = best_lob_orders\n self.orders[best_lob_order_oid].qty = best_lob_order_qty\n # The LOB order it matched against is only a partial fill\n add_msg(msg_list, best_lob_order_tid, best_lob_order_oid, \"PART\", [transaction], self.orders[best_lob_order_oid], fee, verbose)\n # add_tapeitem(tape_events, 'Trade', time, price, qty, tid_from, tid_to, verbose)\n else:\n # the best LOB order is fully consumed: delete it from LOB\n del(best_lob_orders[0])\n del(self.orders[best_lob_order_oid])\n # The LOB order it matched against also complete\n add_msg(msg_list, best_lob_order_tid, best_lob_order_oid, \"FILL\", [transaction], None, fee, verbose)\n # add_tapeitem(tape_events, 'Trade', time, price, qty, tid_from, tid_to, verbose)\n # check: are there other remaining orders at this price?\n if len(best_lob_orders) > 0:\n # yes\n self.lob[0][1] = best_lob_orders\n else:\n # no\n del (self.lob[0]) # consumed the last order on the LOB at this price\n qty_remaining = 0 # liquidity-taking all done\n else:\n # order is only partially filled by current best order, but current best LOB order is fully filled\n # consume all the current best and repeat\n qty = best_lob_order_qty\n price = best_lob_price\n qty_filled = qty_filled + qty\n transaction = {\"Price\": price, \"Qty\": qty}\n trnsctns.append(transaction)\n\n # add a message to the list of outgoing messages from exch to traders\n add_msg(msg_list, best_lob_order_tid, best_lob_order_oid, \"FILL\", [transaction], None, fee, verbose)\n\n # add a record of this to the tape (NB this identifies both parties to the trade, so only do it once)\n add_tapeitem(tape_events, pool_id, 'Trade', time, price, qty, tid_from, tid_to, verbose)\n\n # the best LOB order is fully consumed: delete it from LOB and from order-list\n del(self.orders[best_lob_order_oid])\n del(best_lob_orders[0])\n\n # check: are there other remaining orders at this price?\n if len(best_lob_orders) > 0:\n # yes\n self.lob[0][1] = best_lob_orders\n else:\n # no\n del (self.lob[0]) # consumed the last order on the LOB at this price\n\n qty_remaining = qty_remaining - qty\n if verbose: print('New LOB=%s orders=%s' % (str(self.lob), str(self.orders)))\n\n # main while loop ends here\n\n # when we get to here either...\n # the order completely filled by consuming the front of the book (which may have emptied the whole book)\n # or the whole book was consumed (and is now empty) without completely filling the order\n # or IOC consumed as much of the book's availability at the order's indicated price (good_price = False)\n\n if qty_remaining > 0 :\n if qty_remaining == order.qty:\n # this order is wholly unfilled: that's a FAIL (how did this get past the initial checks?)\n add_msg(msg_list, order.tid, order.orderid, \"FAIL\", [], None, fee, verbose)\n else:\n # this liquidity-taking order only partially filled but ran out of usable LOB\n order.qty = qty_remaining #revise the order quantity\n add_msg(msg_list, order.tid, order.orderid, \"PART\", trnsctns, order, fee, verbose)\n # add_tapeitem(tape_events, 'Trade', time, price, qty, tid_from, tid_to, verbose)\n\n if verbose:\n print('<Orderbook_Half.book_take() TapeEvents=%s' % tape_events)\n print('<Orderbook_Half.book_take() TraderMsgs=')\n for msg in msg_list:\n print('%s,' % str(msg))\n print('\\n')\n\n # rebuild the lob to reflect the adjusted order list\n self.build_lob(verbose)\n\n return {\"TraderMsgs\":msg_list, \"TapeEvents\":tape_events}\n\n\n\n# Orderbook for a single instrument: list of bids and list of asks and methods to manipulate them\n\nclass Orderbook(Orderbook_half):\n\n\n def __init__(self, id_string):\n self.idstr = id_string # give it a name\n self.bids = Orderbook_half('Bid', bse_sys_minprice)\n self.asks = Orderbook_half('Ask', bse_sys_maxprice)\n self.ob_tape = [] # tape of just this orderbook's activities (may be consolidated at Exchange level)\n self.last_trans_t = None # time of last transaction\n self.last_trans_p = None # price of last transaction\n self.last_trans_q = None # quantity of last transaction\n\n\n def __str__(self):\n s = 'Orderbook:\\n'\n s = s + 'Bids: %s \\n' % str(self.bids)\n s = s + 'Asks: %s \\n' % str(self.asks)\n s = s + 'Tape[-5:]: %s \\n' % str(self.ob_tape[-5:])\n s = s + '\\n'\n return s\n\n\n def midprice(self, bid_p, bid_q, ask_p, ask_q):\n # returns midprice as mean of best bid and best ask if both best bid & best ask exist\n # if only one best price exists, returns that as mid\n # if neither best price exists, returns None\n mprice = None\n if bid_q > 0 and ask_q == None :\n mprice = bid_p\n elif ask_q > 0 and bid_q == None :\n mprice = ask_p\n elif bid_q>0 and ask_q >0 :\n mprice = ( bid_p + ask_p ) / 2.0\n return mprice\n\n\n def microprice(self, bid_p, bid_q, ask_p, ask_q):\n mprice = None\n if bid_q>0 and ask_q >0 :\n tot_q = bid_q + ask_q\n mprice = ( (bid_p * ask_q) + (ask_p * bid_q) ) / tot_q\n return mprice\n\n\n def add_lim_order(self, order, verbose):\n # add a LIM order to the LOB and update records\n if verbose: print('>add_lim_order: order.orderid=%d' % (order.orderid))\n if order.otype == 'Bid':\n response=self.bids.book_add(order, verbose)\n best_price = self.bids.lob_anon[0][0]\n self.bids.best_price = best_price\n else:\n response=self.asks.book_add(order, verbose)\n best_price = self.asks.lob_anon[0][0]\n self.asks.best_price = best_price\n return response\n\n\n def process_order_CAN(self, time, order, verbose):\n\n # cancel an existing order\n if verbose: print('>Orderbook.process_order_CAN order.orderid=%d' % order.orderid)\n\n if order.otype == 'Bid':\n # cancel order from the bid book\n response = self.bids.book_CAN(time, order, self.idstr, verbose)\n elif order.otype == 'Ask':\n # cancel order from the ask book\n response = self.asks.book_CAN(time, order, self.idstr, verbose)\n else:\n # we should never get here\n sys.exit('process_order_CAN() given neither Bid nor Ask')\n\n # response should be a message for the trader, and an event to write to the tape\n\n if verbose: print('PO_CAN %s' % response)\n\n return response\n\n\n def process_order_XXX(self, time, order, verbose):\n\n # cancel all orders on this orderbook that were issued by the trader that issued this order\n if verbose: print('>Orderbook.process_order_XXX order.orderid=%d' % order.orderid)\n\n tid = order.tid\n # need to sweep through all bids and and all asks and delete all orders from this trader\n\n responselist = []\n\n for bid_order in self.bids.orders:\n if bid_order.tid == tid :\n responselist.append(self.bids.book_CAN(time, order, verbose))\n\n for ask_order in self.asks.orders:\n if ask_order.tid == tid:\n responselist.append(self.asks.book_CAN(time, order, verbose))\n\n # responselist is handed back to caller level for them to unpack\n\n if verbose: print('PO_CAN %s' % responselist)\n\n return responselist\n\n\n def process_order_take(self, time, order, verbose):\n\n if verbose: print('> Orderbook.process_order_take order.orderid=%d' % order.orderid)\n\n if order.otype == 'Bid':\n # this bid consumes from the top of the ask book\n response = self.asks.book_take(time, order, self.idstr, verbose)\n elif order.otype == 'Ask':\n # this ask consumes from the top of the bid book\n response = self.bids.book_take(time, order, self.idstr, verbose)\n else: # we should never get here\n sys.exit('process_order_take() given neither Bid nor Ask')\n\n if verbose: print('OB.PO_take %s' % response)\n\n return response\n\n\n def process_order_LIM(self, time, order, verbose):\n\n # adds LIM and GFD orders -- GFD is just a time-limited LIM\n\n def process_LIM(order, verbose):\n response = self.add_lim_order(order, verbose)\n\n if verbose:\n print('>process_order_LIM order.orderid=%d' % order.orderid)\n print('Response: %s' % response)\n\n return response\n\n oprice = order.price\n\n # does the LIM price cross the spread?\n\n if order.otype == 'Bid':\n if len(self.asks.lob) > 0 and oprice >= self.asks.lob[0][0]:\n # crosses: this LIM bid lifts the best ask, so treat as IOC\n if verbose: print(\"Bid LIM $%s lifts best ask ($%s) =>IOC\" % (oprice, self.asks.lob[0][0]))\n order.ostyle = 'IOC'\n response = self.process_order_take(time, order, verbose)\n else:\n response = process_LIM(order, verbose)\n\n elif order.otype == 'Ask':\n if len(self.bids.lob) > 0 and oprice <= self.bids.lob[0][0]:\n # crosses: this LIM ask hits the best bid, so treat as IOC\n if verbose: print(\"Ask LIM $%s hits best bid ($%s) =>IOC\" % (oprice, self.bids.lob[0][0]))\n order.ostyle = 'IOC'\n response = self.process_order_take(time, order, verbose)\n else:\n response = process_LIM(order, verbose)\n else:\n # we should never get here\n sys.exit('process_order_LIM() given neither Bid nor Ask')\n\n return response\n\n\n def process_order_pending(self, time, order, verbose):\n # this returns a null response because it just places the order on the relevant pending-execution list\n # order styles LOO and MOO are subsequently processed/executed in the market_open() method\n # order styles LOC and MOC are subsequently processed/executed in the market_close() method\n\n if order.ostyle == 'LOO' or order.ostyle == 'MOO':\n if order.otype == 'Bid':\n self.bids.on_open.append(order)\n elif order.otype == 'Ask':\n self.asks.on_open.append(order)\n else:\n # we should never get here\n sys.exit('process_order_pending() LOO/MOO given neither Bid nor Ask')\n\n elif order.ostyle == 'LOC' or order.ostyle == 'MOC':\n if order.otype == 'Bid':\n self.bids.on_close.append(order)\n elif order.otype == 'Ask':\n self.asks.on_close.append(order)\n else:\n # we should never get here\n sys.exit('process_order_pending() LOC/MOC given neither Bid nor Ask')\n\n else: sys.exit('process_order_pending() given something other than LOO MOO LOC MOC')\n\n return {'TraderMsgs':None, 'TapeEvents':None}\n\n\n\n# Exchange's internal orderbooks\n\nclass Exchange(Orderbook):\n\n\n def __init__(self, eid):\n self.eid = eid # exchange ID string\n self.lit = Orderbook(eid + \"Lit\") # traditional lit exchange\n self.drk = Orderbook(eid + \"Drk\") # NB just a placeholder -- in this version of BSE the dark pool is undefined\n self.tape = [] # tape: consolidated record of trading events on the exchange\n self.trader_recs = {} # trader records (balances from fees, reputations, etc), indexed by traderID\n self.order_id = 0 # unique ID code for each order received by the exchange, starts at zero\n self.open = False # is the exchange open (for business) or closed?\n\n\n def __str__(self):\n s = '\\nExchID: %s ' % (self.eid)\n if self.open: s = s + '(Open)\\n'\n else: s = s + '(Closed)\\n'\n s = s + 'Lit ' + str(self.lit)\n s = s + 'Dark ' + str(self.drk)\n s = s + 'OID: %d; ' % self.order_id\n s = s + 'TraderRecs: %s' % self.trader_recs\n s = s + 'Tape[-4:]: %s' % self.tape[-4:]\n s = s + '\\n'\n return s\n\n\n class trader_record:\n # exchange's records for an individual trader\n\n def __init__(self, time, tid):\n self.tid = tid # this trader's ID\n self.regtime = time # time when first registered\n self.balance = 0 # balance at the exchange (from exchange fees and rebates)\n self.reputation = None # reputation -- FOR GEORGE CHURCH todo -- integrate with George's work\n self.orders = [] # list of orders received from this trader\n self.msgs = [] # list of messages sent to this trader\n\n\n def __str__(self):\n s = '[%s bal=%d rep=%s orders=%s msgs=%s]' % (self.tid, self.balance, self.reputation, self.orders, self.msgs)\n return s\n\n\n def consolidate_responses(self, responses):\n\n consolidated = {'TraderMsgs':[], 'TapeEvents':[]}\n\n if len(responses) > 1:\n # only need to do this if been given more than one response\n for resp in responses:\n consolidated['TraderMsgs'].append(resp['TraderMsgs'])\n consolidated['TapeEvents'].append(resp['TapeEvents'])\n # could sort into time-order here, but its not essential -- todo\n else:\n consolidated = responses[0]\n\n return consolidated\n\n\n def mkt_open(self, time, verbose):\n\n # exchange opens for business\n # need to process any LOO and MOO orders:\n # processes LOO and MOO orders in sequence wrt where they are in the relevant on_open list\n\n def open_pool(time, pool, verbose):\n\n responses = []\n\n # LOO and MOO\n for order in pool.on_open:\n if order.ostyle == 'LIM':\n responses.append(pool.process_order_LIM(time, order, verbose))\n elif order.ostyle == 'MKT':\n responses.append(pool.process_order_take(time, order, verbose))\n else: sys.exit('FAIL in open_pool(): neither LIM nor MKT in on_open list ')\n\n return responses\n\n\n print('Exchange %s opening for business', self.eid)\n response_l = open_pool(self.lit)\n response_d = open_pool(self.drk)\n\n self.open = True\n return consolidate_responses([response_l, response_d])\n\n\n def mkt_close(self):\n\n # exchange closes for business\n # need to process any LOC, MOC, and GFD orders\n # NB GFD orders assumes that exchange closing is the same as end of day\n\n def close_pool(time, pool, verbose):\n\n responses = []\n\n # LOC and MOC\n for order in pool.on_close:\n if order.ostyle == 'LIM':\n responses.append(pool.process_order_LIM(time, order, verbose))\n elif order.ostyle == 'MKT':\n responses.append(pool.process_order_take(time, order, verbose))\n else: sys.exit('FAIL in open_pool(): neither LIM nor MKT in on_close list ')\n # GFD -- cancel any orders still on the books\n for order in pool.orders:\n if order.ostyle == 'GFD':\n responses.append(pool.process_order_CAN(time, order, verbose))\n\n return responses\n\n print('Exchange %s closing for business', self.eid)\n response_l = close_pool(self.lit)\n response_d = close_pool(self.drk)\n\n self.open = False\n return consolidate_responses([response_l, response_d])\n\n\n def tape_update(self, tr, verbose):\n\n # updates the tape\n if verbose: print(\"Tape update: tr=%s; len(tape)=%d tape[-3:]=%s\" % (tr, len(self.tape), self.tape[-3:]))\n\n self.tape.append(tr)\n\n if tr['type'] == 'Trade':\n # process the trade\n if verbose: print('>>>>>>>>TRADE t=%5.3f $%d Q%d %s %s\\n' %\n (tr['time'], tr['price'], tr['qty'], tr['party1'], tr['party2']))\n self.last_trans_t = tr['time'] # time of last transaction\n self.last_trans_p = tr['price'] # price of last transaction\n self.last_trans_q = tr['qty'] # quantity of last transaction\n return tr\n\n\n def dump_tape(self, session_id, dumpfile, tmode,traders):\n\n # print('Dumping tape s.tape=')\n # for ti in self.tape:\n # print('%s' % ti)\n\n for tapeitem in self.tape:\n # print('tape_dump: tapitem=%s' % tapeitem)\n if tapeitem['type'] == 'Trade':\n dumpfile.write('%s, %s, %s,%s,%s,%s,%s, %s\\n' % (session_id, tapeitem['pool_id'], tapeitem['time'], tapeitem['price'],tapeitem['qty'],traders[tapeitem['party2']].ttype, traders[tapeitem['party1']].ttype,str(tapeitem)))\n\n if tmode == 'wipe':\n self.tape = []\n\n aaFile = open('myFile_AA.csv','a');\n\n for tapeitem in self.tape:\n # print('tape_dump: tapitem=%s' % tapeitem)\n if tapeitem['type'] == 'Trade':\n if(traders[tapeitem['party2']].ttype == 'SHVR' and traders[tapeitem['party1']].ttype == 'AA'):\n aaFile.write('%s\\n' % (tapeitem['price']))\n\n aaFile.close()\n\n iaaFile = open('myFile_IAA.csv','a')\n\n for tapeitem in self.tape:\n # print('tape_dump: tapitem=%s' % tapeitem)\n if tapeitem['type'] == 'Trade':\n if (traders[tapeitem['party2']].ttype == 'SHVR' and traders[tapeitem['party1']].ttype == 'IAA'):\n iaaFile.write('%s\\n' % (tapeitem['price']))\n\n iaaFile.close()\n\n\n\n\n\n def process_order(self, time, order, verbose):\n # process the order passed in as a parameter\n # number of allowable order-types is significantly expanded in BSE2 (previously just had LIM/MKT functionality)\n # BSE2 added order types such as FOK, ICE, etc\n # also added stub logic for larger orders to be routed to dark pool\n # currently treats dark pool as another instance of Orderbook, same as lit pool\n # incoming order has order ID assigned by exchange\n # return is {'tape_summary':... ,'trader_msgs':...}, explained further below\n\n if verbose: print('>Exchange.process_order()\\n')\n\n trader_id = order.tid\n\n if not trader_id in self.trader_recs:\n # we've not seen this trader before, so create a record for it\n if verbose: print('t=%f: Exchange %s registering Trader %s:' % (time, self.eid, trader_id))\n trader_rec = self.trader_record(time, trader_id)\n self.trader_recs[trader_id] = trader_rec\n if verbose: print('record= %s' % str(trader_rec))\n\n # what quantity qualifies as a block trade (route to DARK)?\n block_size = 300\n\n ostyle = order.ostyle\n\n ack_response = Exch_msg(trader_id, order.orderid, 'ACK', [[order.price, order.qty]], None, 0, 0)\n if verbose: print ack_response\n\n\n # which pool does it get sent to: Lit or Dark?\n if order.qty < block_size:\n if verbose: print('Process_order: qty=%d routes to LIT pool' % order.qty)\n pool = self.lit\n else:\n if verbose: print('Process_order: qty=%d routes to DARK pool' % order.qty)\n pool = self.drk\n\n\n # Cancellations don't generate new order-ids\n\n if ostyle == 'CAN':\n # deleting a single existing order\n # NB this trusts the order.qty -- sends CANcel only to the pool that the QTY indicates\n response = pool.process_order_CAN(time, order, verbose)\n\n elif ostyle == 'XXX':\n # delete all orders from the trader that issued the XXX order\n # need to sweep through both pools\n response_l = self.lit.process_order_XXX(time, order, verbose)\n response_d = self.drk.process_order_XXX(time, order, verbose)\n # the response from either lit and/or dark might be a string of responses from multiple individual CAN orders\n # here we just glue those together for later processing\n self.consolidate_responses([response_l, response_d])\n\n else:\n # give each new order a unique ID\n order.orderid = self.order_id\n self.order_id = order.orderid + 1\n\n ack_msg = Exch_msg(trader_id, order.orderid, 'ACK', [[order.price, order.qty]], None, 0, 0)\n\n if verbose: print('OrderID:%d, ack:%s\\n' % (order.orderid, ack_msg))\n\n if ostyle == 'LIM' or ostyle == 'GFD':\n # GFD is just a LIM order with an expiry time\n response = pool.process_order_LIM(time, order, verbose)\n\n elif ostyle == 'MKT' or ostyle == 'AON' or ostyle == 'FOK' or ostyle == 'IOC':\n if ostyle == 'AON': pool.resting.append(order) # put it on the list of resting orders\n response = pool.process_order_take(time, order, verbose)\n # AON is a special case: if current response is that it FAILed, but has not timed out\n # then ignore the failure\n # and if it didn't fail, check to remove it from the MOB\n if ostyle == 'AON':\n if response['TraderMsgs'].event == 'FAIL':\n # it failed, but has it timed out yet?\n if time < order.styleparams['ExpiryTime']:\n # it hasn't expired yet\n # nothing to say back to the trader, nothing to write to tape\n response['TraderMsgs'] = None\n response['TapeEvents'] = None\n else: # AON order executed successfully, remove it from the MOB\n pool.resting.remove(order)\n\n elif ostyle == 'LOC' or ostyle == 'MOC' or ostyle == 'LOO' or ostyle == 'MOO':\n # these are just placed on the relevant wait-list at the exchange\n # and then processed by mkt_open() or mkt_close()\n response = pool.process_order_pending(time, order, verbose)\n\n elif ostyle == 'OCO' or ostyle == 'OSO':\n # processing of OSO and OCO orders is a recursive call of this method\n # that is, call process_order() on the first order in the OXO pair\n # then call or ignore the second order depending on outcome of the first\n # OCO and OSO are both defined via the following syntax...\n # ostyle=OSO or OCO; styleparams=[[order1], [order2]]\n # currently only defined for [order1] and [order2] both LIM type\n\n if len(order.styleparams) == 2:\n order1 = order.styleparams[0]\n order2 = order.styleparams[1]\n if order1.ostyle == 'LIM' and order2.ostyle == 'LIM':\n sys.exit('Give up')\n\n response = pool.process_order_OXO(time, order, verbose)\n\n elif ostyle == 'ICE':\n # this boils down to a chain of successively refreshed OSO orders, until its all used up\n # so underneath it's LIM functionality only\n response = pool.process_order_ICE(time, order, verbose)\n\n else:\n sys.exit('FAIL: process_order given order style %s', ostyle)\n\n\n\n\n if verbose: print ('<Exch.Proc.Order(): Order=%s; Response=%s' % (order, response))\n\n # default return values\n trader_msgs = None\n tape_events = None\n\n if response != None:\n # non-null response should be dictionary with two items: list of trader messages and list of tape events\n if verbose: print('Response ---- ')\n trader_msgs = response[\"TraderMsgs\"]\n tape_events = response[\"TapeEvents\"]\n\n total_fees = 0\n # trader messages include details of fees charged by exchange for processing this order\n for msg in trader_msgs:\n if msg.tid == trader_id:\n total_fees += msg.fee\n if verbose: print('Trader %s adding fee %d from msg %s' % (trader_id, msg.fee, msg))\n self.trader_recs[trader_id].balance += total_fees\n if verbose: print('Trader %s Exch %s: updated balance=%d' % (trader_id, self.eid, self.trader_recs[trader_id].balance))\n\n # record the tape events on the tape\n if len(tape_events) > 0:\n for event in tape_events:\n self.tape_update(event, verbose)\n\n if verbose:\n print('<Exch.Proc.Order(): tape_events=%s' % tape_events)\n s = '<Exch.Proc.Order(): trader_msgs=['\n for msg in trader_msgs:\n s = s + '[' + str(msg) + '], '\n s = s + ']'\n print(s)\n\n # by this point, tape has been updated\n # so in principle only thing process_order hands back to calling level is messages for traders\n\n # but...\n\n # for back-compatibility with this method in BSE1.x and with trader definitions (AA, ZIP, etc)\n # we ALSO hand back a \"transaction record\" which summarises any actual transactions\n # or is None if no transactions occurred. Structure was:\n # transaction_record = {'type': 'Trade',\n # 'time': time,\n # 'price': price,\n # 'party1': counterparty,\n # 'party2': order.tid,\n # 'qty': order.qty\n # }\n # In BSE 1.x the maximum order-size was Qty=1, which kept things very simple\n # In BSE 2.x, a single order of Qty>1 can result in multiple separate transactions,\n # so we need to aggregate those into one order. Do this by computing total cost C of\n # execution for quantity Q and then declaring that the price for each unit was C/Q\n # As there may now be more then one counterparty to a single order, party1 & party2 returned as None\n\n tape_summary = None\n if len(tape_events) > 0:\n total_cost = 0\n total_qty = 0\n if verbose: print('tape_summary:')\n for event in tape_events:\n if event['type'] == 'Trade':\n total_cost += event['price']\n total_qty += event['qty']\n if verbose: print('total_cost=%d; total_qty=%d' % (total_cost, total_qty))\n if total_qty > 0 :\n avg_cost = total_cost / total_qty\n if verbose: print('avg_cost=%d' % avg_cost)\n tape_summary = {'type': 'Trade',\n 'time': time,\n 'price': avg_cost,\n 'party1': None,\n 'party2': None,\n 'qty': total_qty}\n\n return {'tape_summary':tape_summary, 'trader_msgs':trader_msgs}\n else: return {'tape_summary':None, 'trader_msgs':None}\n\n\n # this returns the LOB data \"published\" by the exchange,\n # only applies to the lit book -- dark pools aren't published\n def publish_lob(self, time, tape_depth, verbose):\n\n n_bids = len(self.lit.bids.orders)\n if n_bids > 0 :\n best_bid_p = self.lit.bids.lob_anon[0][0]\n else: best_bid_p = None\n\n n_asks = len(self.lit.asks.orders)\n if n_asks > 0:\n best_ask_p = self.lit.asks.lob_anon[0][0]\n else:\n best_ask_p = None\n\n public_data = {}\n public_data['time'] = time\n public_data['bids'] = {'bestp':best_bid_p,\n 'worstp':self.lit.bids.worst_price,\n 'n': n_bids,\n 'lob':self.lit.bids.lob_anon}\n public_data['asks'] = {'bestp':best_ask_p,\n 'worstp':self.lit.asks.worst_price,\n 'n': n_asks,\n 'lob':self.lit.asks.lob_anon}\n\n public_data['last_t'] = self.lit.last_trans_t\n public_data['last_p'] = self.lit.last_trans_p\n public_data['last_q'] = self.lit.last_trans_q\n\n\n\n\n if tape_depth == None :\n public_data['tape'] = self.tape # the full thing\n else:\n public_data['tape'] = self.tape[-tape_depth:] # depth-limited\n\n public_data['midprice'] = None\n public_data['microprice'] = None\n if n_bids>0 and n_asks>0 :\n # neither side of the LOB is empty\n best_bid_q= self.lit.bids.lob_anon[0][1]\n best_ask_q = self.lit.asks.lob_anon[0][1]\n public_data['midprice'] = self.lit.midprice(best_bid_p, best_bid_q, best_ask_p, best_ask_q)\n public_data['microprice'] = self.lit.microprice(best_bid_p, best_bid_q, best_ask_p, best_ask_q)\n\n if verbose:\n print('Exchange.publish_lob: t=%s' % time)\n print('BID_lob=%s' % public_data['bids']['lob'])\n print('best=%s; worst=%s; n=%s ' % (best_bid_p, self.lit.bids.worst_price, n_bids))\n print(str(self.lit.bids))\n print('ASK_lob=%s' % public_data['asks']['lob'])\n print('best=%s; worst=%s; n=%s ' % (best_ask_p, self.lit.asks.worst_price, n_asks))\n print(str(self.lit.asks))\n print('Midprice=%s; Microprice=%s' % (public_data['midprice'], public_data['microprice']))\n print('Last transaction: time=%s; price=%s; qty=%s' % (public_data['last_t'],public_data['last_p'],public_data['last_q']))\n print('tape[-3:]=%s'% public_data['tape'][-3:])\n sys.stdout.flush()\n\n\n return public_data\n\n\n\n\n\n\n\n##########################---Below lies the experiment/test-rig---##################\n\n\n\n# trade_stats()\n# dump CSV statistics on exchange data and trader population to file for later analysis\n# this makes no assumptions about the number of types of traders, or\n# the number of traders of any one type -- allows either/both to change\n# between successive calls, but that does make it inefficient as it has to\n# re-analyse the entire set of traders on each call\ndef trade_stats(expid, traders, dumpfile, time, lob):\n trader_types = {}\n n_traders = len(traders)\n for t in traders:\n ttype = traders[t].ttype\n if ttype in trader_types.keys():\n t_balance = trader_types[ttype]['balance_sum'] + traders[t].balance\n n = trader_types[ttype]['n'] + 1\n else:\n t_balance = traders[t].balance\n n = 1\n trader_types[ttype] = {'n':n, 'balance_sum':t_balance}\n\n\n dumpfile.write('%s, %06d, ' % (expid, time))\n for ttype in sorted(list(trader_types.keys())):\n n = trader_types[ttype]['n']\n s = trader_types[ttype]['balance_sum']\n dumpfile.write('%s, %d, %d, %f, ' % (ttype, s, n, s / float(n)))\n\n if lob['bids']['bestp'] != None :\n dumpfile.write('%d, ' % (lob['bids']['bestp']))\n else:\n dumpfile.write('N, ')\n if lob['asks']['bestp'] != None :\n dumpfile.write('%d, ' % (lob['asks']['bestp']))\n else:\n dumpfile.write('N, ')\n dumpfile.write('\\n');\n\n\n\n# create a bunch of traders from traders_spec\n# returns tuple (n_buyers, n_sellers)\n# optionally shuffles the pack of buyers and the pack of sellers\ndef populate_market(traders_spec, traders, shuffle, verbose):\n\n\n def trader_type(robottype, name):\n if robottype == 'GVWY':\n return Trader_Giveaway('GVWY', name, 0.00, 0)\n elif robottype == 'ZIC':\n return Trader_ZIC('ZIC', name, 0.00, 0)\n elif robottype == 'SHVR':\n return Trader_Shaver('SHVR', name, 0.00, 0)\n elif robottype == 'ISHV':\n return Trader_ISHV('ISHV', name, 0.00, 0)\n elif robottype == 'SNPR':\n return Trader_Sniper('SNPR', name, 0.00, 0)\n elif robottype == 'ZIP':\n return Trader_ZIP('ZIP', name, 0.00, 0)\n elif robottype == 'AA':\n return Trader_AA('AA', name, 0.00, 0)\n\n elif robottype == 'AAA':\n return Trader_AA('AAA', name, 0.00, 0)\n\n\n elif robottype == 'SIMPLE':\n return Trader_Simple_MLOFI('SIMPLE',name,0.00,0)\n elif robottype == 'IAA_MLOFI_ASK':\n return Trader_IAA_MLOFI('MLOFI_ASK',name,0.00,0)\n elif robottype == 'IAA_MLOFI_BID':\n return Trader_IAA_MLOFI('MLOFI_BID',name,0.00,0)\n elif robottype == 'AAAA':\n return Trader_AA('AAAA', name, 0.00, 0)\n\n elif robottype == 'ISHV_ASK':\n return Trader_ISHV('ISHV_ASK', name, 0.00, 0)\n elif robottype == 'GDX':\n return Trader_GDX('GDX', name, 0.00, 0)\n elif robottype == 'GDXB':\n return Trader_GDX('GDXB', name, 0.00, 0)\n elif robottype == 'ZIPP':\n return Trader_ZIP('ZIPP', name, 0.00, 0)\n\n elif robottype == 'IAA_MLOFI':\n return Trader_IAA_MLOFI('MLOFI',name,0.00,0,3)\n elif robottype == 'IAA_MLOFI1':\n return Trader_IAA_MLOFI('MLOFI1',name,0.00,0, 1)\n\n elif robottype == 'IAA_MLOFI2':\n return Trader_IAA_MLOFI('MLOFI2', name, 0.00,0, 2)\n elif robottype == 'IAA_MLOFI3':\n return Trader_IAA_MLOFI('MLOFI3', name, 0.00,0, 3)\n elif robottype == 'IAA_MLOFI4':\n return Trader_IAA_MLOFI('MLOFI4', name, 0.00, 0,4)\n elif robottype == 'IAA_MLOFI5':\n return Trader_IAA_MLOFI('MLOFI5', name, 0.00,0, 5)\n elif robottype == 'IAA_MLOFI6':\n return Trader_IAA_MLOFI('MLOFI6', name, 0.00,0, 6)\n elif robottype == 'IAA_MLOFI7':\n return Trader_IAA_MLOFI('MLOFI7', name, 0.00,0, 7)\n elif robottype == 'IAA_MLOFI8':\n return Trader_IAA_MLOFI('MLOFI8', name, 0.00,0, 8)\n elif robottype == 'IAA_MLOFI9':\n return Trader_IAA_MLOFI('MLOFI9', name, 0.00,0, 9)\n elif robottype == 'IAA_MLOFI10':\n return Trader_IAA_MLOFI('MLOFI10', name, 0.00, 0, 10)\n elif robottype == 'IAA_MLOFI11':\n return Trader_IAA_MLOFI('MLOFI11', name, 0.00, 0, 11)\n elif robottype == 'IAA_MLOFI12':\n return Trader_IAA_MLOFI('MLOFI12', name, 0.00, 0, 12)\n elif robottype == 'IAA_MLOFI13':\n return Trader_IAA_MLOFI('MLOFI13', name, 0.00, 0, 13)\n elif robottype == 'IAA_MLOFI14':\n return Trader_IAA_MLOFI('MLOFI14', name, 0.00, 0, 14)\n elif robottype == 'IAA_MLOFI15':\n return Trader_IAA_MLOFI('MLOFI15', name, 0.00, 0, 15)\n elif robottype == 'IAA_MLOFI16':\n return Trader_IAA_MLOFI('MLOFI16', name, 0.00, 0, 16)\n elif robottype == 'IAA_MLOFI17':\n return Trader_IAA_MLOFI('MLOFI17', name, 0.00, 0, 17)\n elif robottype == 'IAA_MLOFI18':\n return Trader_IAA_MLOFI('MLOFI18', name, 0.00, 0, 18)\n elif robottype == 'IAA_MLOFI19':\n return Trader_IAA_MLOFI('MLOFI19', name, 0.00, 0, 19)\n elif robottype == 'IAA_MLOFI20':\n return Trader_IAA_MLOFI('MLOFI20', name, 0.00, 0, 20)\n elif robottype == 'IAA_MLOFI30':\n return Trader_IAA_MLOFI('MLOFI30', name, 0.00, 0, 30)\n elif robottype == 'IAA_MLOFI50':\n return Trader_IAA_MLOFI('MLOFI50', name, 0.00, 0, 50)\n elif robottype == 'IZIP_3':\n return Trader_IZIP_MLOFI('IZIP_3',name,0.00,0,3)\n\n elif robottype == 'IGDX_3':\n return Trader_IGDX_MLOFI('IGDX_3', name, 0.00, 0, 3)\n elif robottype == 'IZIPB_3':\n return Trader_IZIP_MLOFI('IZIPB_3',name,0.00,0,3)\n\n elif robottype == 'IGDXB_3':\n return Trader_IGDX_MLOFI('IGDXB_3', name, 0.00, 0, 3)\n elif robottype == 'IAAB_3':\n return Trader_IAA_MLOFI('IAAB3', name, 0.00,0, 3)\n\n\n elif robottype == 'ASK_IGDX_3':\n return Trader_IGDX_MLOFI('ASK_IGDX_3', name, 0.00, 0, 3)\n elif robottype == 'BID_IGDX_3':\n return Trader_IGDX_MLOFI('BID_IGDX_3', name, 0.00, 0, 3)\n elif robottype == 'ASK_IZIP_3':\n return Trader_IZIP_MLOFI('ASK_IZIP_3',name,0.00,0,3)\n elif robottype == 'BID_IZIP_3':\n return Trader_IZIP_MLOFI('BID_IZIP_3',name,0.00,0,3)\n elif robottype == 'ASK_IAA_3':\n return Trader_IAA_MLOFI('ASK_IAA_3', name, 0.00,0, 3)\n elif robottype == 'BID_IAA_3':\n return Trader_IAA_MLOFI('BID_IAA_3', name, 0.00, 0, 3)\n\n elif robottype == 'ASK_AA':\n return Trader_AA('ASK_AA', name, 0.00, 0)\n elif robottype == 'BID_AA':\n return Trader_AA('BID_AA', name, 0.00, 0)\n elif robottype == 'ASK_GDX':\n return Trader_GDX('ASK_GDX', name, 0.00, 0)\n elif robottype == 'BID_GDX':\n return Trader_GDX('BID_GDX', name, 0.00, 0)\n elif robottype == 'ASK_ZIP':\n return Trader_ZIP('ASK_ZIP', name, 0.00, 0)\n elif robottype == 'BID_ZIP':\n return Trader_ZIP('BID_ZIP', name, 0.00, 0)\n\n\n elif robottype == 'IAA':\n return Trader_IAA_MLOFI('IAA', name, 0.00,0, 3)\n elif robottype == 'IAA_3':\n return Trader_IAA_MLOFI('IAA_3', name, 0.00,0, 3)\n\n\n elif robottype == 'ASK_SHVR':\n return Trader_Shaver('ASK_SHVR', name, 0.00, 0)\n elif robottype == 'BID_SHVR':\n return Trader_Shaver('BID_SHVR', name, 0.00, 0)\n elif robottype == 'ASK_ISHV_3':\n return Trader_ISHV('ASK_ISHV_3', name, 0.00, 0)\n elif robottype == 'BID_ISHV_3':\n return Trader_ISHV('BID_ISHV_3', name, 0.00, 0)\n\n\n elif robottype == 'GDXXX':\n return Trader_GDX('GDXXX', name, 0.00, 0)\n elif robottype == 'GDXX':\n return Trader_GDX('GDXX', name, 0.00, 0)\n\n\n\n elif robottype == 'IAA_NEW':\n return Trader_IAA_NEW('IAA_NEW', name, 0.00, 0, 3)\n\n\n elif robottype == 'ZZISHV':\n return Trader_ZZISHV('ZZISHV', name, 0.00, 0,3)\n elif robottype == 'ASK_ZZISHV':\n return Trader_ZZISHV('ASK_ZZISHV', name, 0.00, 0,3)\n elif robottype == 'BID_ZZISHV':\n return Trader_ZZISHV('BID_ZZISHV', name, 0.00, 0,3)\n elif robottype == 'ASK_ISHV':\n return Trader_ISHV('ASK_ISHV', name, 0.00, 0)\n elif robottype == 'BID_ISHV':\n return Trader_ISHV('BID_ISHV', name, 0.00, 0)\n\n elif robottype == 'ZIPPP':\n return Trader_ZIP('ZIPPP', name, 0.00, 0)\n elif robottype == 'ZIPP':\n return Trader_ZIP('ZIPP', name, 0.00, 0)\n elif robottype == 'IZIP':\n return Trader_IZIP_MLOFI('IZIP',name,0.00,0,3)\n elif robottype == 'IGDX':\n return Trader_IGDX_MLOFI('IGDX', name, 0.00, 0, 3)\n\n\n else:\n sys.exit('FATAL: don\\'t know robot type %s\\n' % robottype)\n\n\n\n def shuffle_traders(ttype_char, n, traders):\n for swap in range(n):\n t1 = (n - 1) - swap\n t2 = random.randint(0, t1)\n t1name = '%c%02d' % (ttype_char, t1)\n t2name = '%c%02d' % (ttype_char, t2)\n traders[t1name].tid = t2name\n traders[t2name].tid = t1name\n temp = traders[t1name]\n traders[t1name] = traders[t2name]\n traders[t2name] = temp\n\n\n n_buyers = 0\n for bs in traders_spec['buyers']:\n ttype = bs[0]\n for b in range(bs[1]):\n tname = 'B%02d' % n_buyers # buyer i.d. string\n traders[tname] = trader_type(ttype, tname)\n n_buyers = n_buyers + 1\n\n if n_buyers < 1:\n sys.exit('FATAL: no buyers specified\\n')\n\n if shuffle: shuffle_traders('B', n_buyers, traders)\n\n\n n_sellers = 0\n for ss in traders_spec['sellers']:\n ttype = ss[0]\n for s in range(ss[1]):\n tname = 'S%02d' % n_sellers # buyer i.d. string\n traders[tname] = trader_type(ttype, tname)\n n_sellers = n_sellers + 1\n\n if n_sellers < 1:\n sys.exit('FATAL: no sellers specified\\n')\n\n if shuffle: shuffle_traders('S', n_sellers, traders)\n\n if verbose:\n for t in range(n_buyers):\n bname = 'B%02d' % t\n print(traders[bname])\n for t in range(n_sellers):\n bname = 'S%02d' % t\n print(traders[bname])\n\n\n return {'n_buyers':n_buyers, 'n_sellers':n_sellers}\n\n\n\n# customer_orders(): allocate orders to traders\n# this version only issues LIM orders; LIM that crosses the spread executes as MKT\n# parameter \"os\" is order schedule\n# os['timemode'] is either 'periodic', 'drip-fixed', 'drip-jitter', or 'drip-poisson'\n# os['interval'] is number of seconds for a full cycle of replenishment\n# drip-poisson sequences will be normalised to ensure time of last replenishment <= interval\n# parameter \"pending\" is the list of future orders (if this is empty, generates a new one from os)\n# revised \"pending\" is the returned value\n#\n# also returns a list of \"cancellations\": trader-ids for those traders who are now working a new order and hence\n# need to kill quotes already on LOB from working previous order\n#\n#\n# if a supply or demand schedule mode is \"random\" and more than one range is supplied in ranges[],\n# then each time a price is generated one of the ranges is chosen equiprobably and\n# the price is then generated uniform-randomly from that range\n#\n# if len(range)==2, interpreted as min and max values on the schedule, specifying linear supply/demand curve\n# if len(range)==3, first two vals are min & max, third value should be a function that generates a dynamic price offset\n# -- the offset value applies equally to the min & max, so gradient of linear sup/dem curve doesn't vary\n# if len(range)==4, the third value is function that gives dynamic offset for schedule min,\n# and fourth is a function giving dynamic offset for schedule max, so gradient of sup/dem linear curve can vary\n#\n# the interface on this is a bit of a mess... could do with refactoring\n\n\ndef customer_orders(time, last_update, traders, trader_stats, os, pending, base_oid, verbose):\n\n\n def sysmin_check(price):\n if price < bse_sys_minprice:\n print('WARNING: price < bse_sys_min -- clipped')\n price = bse_sys_minprice\n return price\n\n\n def sysmax_check(price):\n if price > bse_sys_maxprice:\n print('WARNING: price > bse_sys_max -- clipped')\n\n price = bse_sys_maxprice\n return price\n \n\n def getorderprice(i, sched_end, sched, n, mode, issuetime):\n # does the first schedule range include optional dynamic offset function(s)?\n if len(sched[0]) > 2:\n offsetfn = sched[0][2][0]\n offsetfn_params = [sched_end] + [p for p in sched[0][2][1] ]\n if callable(offsetfn):\n # same offset for min and max\n offset_min = offsetfn(issuetime, offsetfn_params)\n offset_max = offset_min\n else:\n sys.exit('FAIL: 3rd argument of sched in getorderprice() should be [callable_fn [params]]')\n if len(sched[0]) > 3:\n # if second offset function is specfied, that applies only to the max value\n offsetfn = sched[0][3][0]\n offsetfn_params = [sched_end] + [p for p in sched[0][3][1] ]\n if callable(offsetfn):\n # this function applies to max\n offset_max = offsetfn(issuetime, offsetfn_params)\n else:\n sys.exit('FAIL: 4th argument of sched in getorderprice() should be [callable_fn [params]]')\n else:\n offset_min = 0.0\n offset_max = 0.0\n\n pmin = sysmin_check(offset_min + min(sched[0][0], sched[0][1]))\n pmax = sysmax_check(offset_max + max(sched[0][0], sched[0][1]))\n prange = pmax - pmin\n stepsize = prange / (n - 1)\n halfstep = round(stepsize / 2.0)\n\n if mode == 'fixed':\n orderprice = pmin + int(i * stepsize) \n elif mode == 'jittered':\n orderprice = pmin + int(i * stepsize) + random.randint(-halfstep, halfstep)\n elif mode == 'random':\n if len(sched) > 1:\n # more than one schedule: choose one equiprobably\n s = random.randint(0, len(sched) - 1)\n pmin = sysmin_check(min(sched[s][0], sched[s][1]))\n pmax = sysmax_check(max(sched[s][0], sched[s][1]))\n orderprice = random.randint(pmin, pmax)\n else:\n sys.exit('FAIL: Unknown mode in schedule')\n orderprice = sysmin_check(sysmax_check(orderprice))\n return orderprice\n\n\n def getissuetimes(n_traders, mode, interval, shuffle, fittointerval):\n # generates a set of issue times for the customer orders to arrive at\n interval = float(interval)\n if n_traders < 1:\n sys.exit('FAIL: n_traders < 1 in getissuetime()')\n elif n_traders == 1:\n tstep = interval\n else:\n tstep = interval / (n_traders - 1)\n arrtime = 0\n issuetimes = []\n for t in range(n_traders):\n if mode == 'periodic':\n arrtime = interval\n elif mode == 'drip-fixed':\n arrtime = t * tstep\n elif mode == 'drip-jitter':\n arrtime = t * tstep + tstep * random.random()\n elif mode == 'drip-poisson':\n # poisson requires a bit of extra work\n interarrivaltime = random.expovariate(n_traders / interval)\n arrtime += interarrivaltime\n else:\n sys.exit('FAIL: unknown time-mode in getissuetimes()')\n issuetimes.append(arrtime) \n \n # at this point, arrtime is the *last* arrival time\n if fittointerval and mode == 'drip-poisson' and (arrtime != interval) :\n # generated sum of interarrival times longer than the interval\n # squish them back so that last arrival falls at t=interval\n for t in range(n_traders):\n issuetimes[t] = interval * (issuetimes[t] / arrtime)\n\n # optionally randomly shuffle the times\n if shuffle:\n for t in range(n_traders):\n i = (n_traders - 1) - t\n j = random.randint(0, i)\n tmp = issuetimes[i]\n issuetimes[i] = issuetimes[j]\n issuetimes[j] = tmp\n return issuetimes\n \n\n def getschedmode(time, os):\n # os is order schedules\n got_one = False\n for sched in os:\n if (sched['from'] <= time) and (time < sched['to']) :\n # within the timezone for this schedule\n schedrange = sched['ranges']\n mode = sched['stepmode']\n sched_end_time = sched['to']\n got_one = True\n exit # jump out the loop -- so the first matching timezone has priority over any others\n if not got_one:\n sys.exit('Fail: time=%5.2f not within any timezone in os=%s' % (time, os))\n return (schedrange, mode, sched_end_time)\n \n\n n_buyers = trader_stats['n_buyers']\n n_sellers = trader_stats['n_sellers']\n\n shuffle_times = True\n\n cancellations = []\n\n oid = base_oid\n\n max_qty = 1\n\n if len(pending) < 1:\n # list of pending (to-be-issued) customer orders is empty, so generate a new one\n new_pending = []\n\n # demand side (buyers)\n issuetimes = getissuetimes(n_buyers, os['timemode'], os['interval'], shuffle_times, True)\n ordertype = 'Bid'\n orderstyle = 'LIM'\n (sched, mode, sched_end) = getschedmode(time, os['dem'])\n for t in range(n_buyers):\n issuetime = time + issuetimes[t]\n tname = 'B%02d' % t\n ## flag\n orderprice = getorderprice(t, sched_end, sched, n_buyers, mode, issuetime)\n # if time<101 or (time>201 and time <301) or (time>401 and time<501):\n # orderprice = 150\n # else:\n # orderprice = 100\n orderqty = random.randint(1,max_qty)\n # order = Order(tname, ordertype, orderstyle, orderprice, orderqty, issuetime, None, oid)\n order = Assignment(\"CUS\", tname, ordertype, orderstyle, orderprice, orderqty, issuetime, None, oid)\n oid += 1\n new_pending.append(order)\n \n # supply side (sellers)\n issuetimes = getissuetimes(n_sellers, os['timemode'], os['interval'], shuffle_times, True)\n ordertype = 'Ask'\n orderstyle = 'LIM'\n (sched, mode, sched_end) = getschedmode(time, os['sup'])\n for t in range(n_sellers):\n issuetime = time + issuetimes[t]\n tname = 'S%02d' % t\n orderprice = getorderprice(t, sched_end, sched, n_sellers, mode, issuetime)\n # if time<101 or (time>201 and time <301) or (time>401 and time<501):\n # orderprice = 50\n # else:\n # orderprice = 20\n orderqty = random.randint(1, max_qty)\n # order = Order(tname, ordertype, orderstyle, orderprice, orderqty, issuetime, None, oid)\n order = Assignment(\"CUS\", tname, ordertype, orderstyle, orderprice, orderqty, issuetime, None, oid)\n oid += 1\n new_pending.append(order)\n else:\n # there are pending future orders: issue any whose timestamp is in the past\n new_pending = []\n for order in pending:\n if order.time < time:\n # this order should have been issued by now\n # issue it to the trader\n tname = order.trad_id\n response = traders[tname].add_cust_order(order, verbose)\n if verbose: print('Customer order: %s %s' % (response, order))\n if response == 'LOB_Cancel' :\n cancellations.append(tname)\n if verbose: print('Cancellations: %s' % (cancellations))\n # and then don't add it to new_pending (i.e., delete it)\n else:\n # this order stays on the pending list\n new_pending.append(order)\n return [new_pending, cancellations, oid]\n\n\n\n# one session in the market\ndef market_session(sess_id, starttime, endtime, trader_spec, order_schedule, summaryfile, tapedumpfile, blotterdumpfile,\n dump_each_trade, verbose):\n\n n_exchanges = 1\n\n tape_depth = 5 # number of most-recent items from tail of tape to be published at any one time\n\n verbosity = False\n\n verbose = verbosity # main loop verbosity\n orders_verbose = verbosity\n lob_verbose = False\n process_verbose = False\n respond_verbose = False\n bookkeep_verbose = False\n\n # fname = 'prices' + sess_id +'.csv'\n # prices_data_file = open(fname, 'w')\n\n # initialise the exchanges\n exchanges = []\n for e in range(n_exchanges):\n eid = \"Exch%d\" % e\n exch = Exchange(eid)\n exchanges.append(exch)\n if verbose: print('Exchange[%d] =%s' % (e, str(exchanges[e])))\n\n # create a bunch of traders\n traders = {}\n trader_stats = populate_market(trader_spec, traders, True, verbose)\n\n\n # print 'describe traders:'\n # for tid in traders:\n # print 'trader.ttype: %s , trader.tid: %s' %(traders[tid].ttype,tid)\n\n\n # timestep set so that can process all traders in one second\n # NB minimum inter-arrival time of customer orders may be much less than this!!\n timestep = 1.0 / float(trader_stats['n_buyers'] + trader_stats['n_sellers'])\n \n duration = float(endtime - starttime)\n\n last_update = -1.0\n\n time = starttime\n\n next_order_id = 0\n\n pending_cust_orders = []\n\n if verbose: print('\\n%s; ' % (sess_id))\n\n tid = None\n\n while time < endtime:\n\n # how much time left, as a percentage?\n time_left = (endtime - time) / duration\n if verbose: print('\\n\\n%s; t=%08.2f (percent remaining: %4.1f/100) ' % (sess_id, time, time_left*100))\n\n trade = None\n\n # get any new assignments (customer orders) for traders to execute\n # and also any customer orders that require previous orders to be killed\n [pending_cust_orders, kills, noid] = customer_orders(time, last_update, traders, trader_stats,\n order_schedule, pending_cust_orders, next_order_id, orders_verbose)\n\n next_order_id = noid\n\n if verbose:\n print('t:%f, noid=%d, pending_cust_orders:' % (time, noid))\n for order in pending_cust_orders: print('%s; ' % str(order))\n\n # if any newly-issued customer orders mean quotes on the LOB need to be cancelled, kill them\n if len(kills) > 0:\n if verbose: print('Kills: %s' % (kills))\n for kill in kills:\n # if verbose: print('lastquote=%s' % traders[kill].lastquote)\n if traders[kill].lastquote != None :\n if verbose: print('Killing order %s' % (str(traders[kill].lastquote)))\n\n can_order = traders[kill].lastquote\n can_order.ostyle = \"CAN\"\n exch_response = exchanges[0].process_order(time, can_order, process_verbose)\n exch_msg = exch_response['trader_msgs']\n # do the necessary book-keeping\n # NB this assumes CAN results in a single message back from the exchange\n traders[kill].bookkeep(exch_msg[0], time, bookkeep_verbose)\n\n for t in traders:\n if len(traders[t].orders) > 0:\n # print(\"Tyme=%5.2d TID=%s Orders[0]=%s\" % (time, traders[t].tid, traders[t].orders[0]))\n dummy = 0 # NOP\n\n # get public lob data from each exchange\n lobs = []\n for e in range(n_exchanges):\n exch = exchanges[e]\n lob = exch.publish_lob(time, tape_depth, lob_verbose)\n # if verbose: print ('Exchange %d, Published LOB=%s' % (e, str(lob)))\n\n lobs.append(lob)\n\n\n # quantity-spike injection\n # this next bit is a KLUDGE that is VERY FRAGILE and has lots of ARBITRARY CONSTANTS in it :-(\n # it is introduced for George Church's project\n # to edit this you have to know how many traders there are (specified in main loop)\n # and you have to know the details of the supply and demand curves too (again, spec in main loop)\n # before public release of this code, tidy it up and parameterise it nicely\n # triggertime = 20\n # replenish_period = 20\n # highest_buyer_index = 10 # this buyer has the highest limit price\n # highest_seller_index = 20\n # big_qty = 222\n #\n # if time > (triggertime - 3*timestep) and ((time+3*timestep) % replenish_period) <= (timestep):\n # # sys.exit('Bailing at injection trigger, time = %f' % time)\n # print ('time: %f')%(time)\n # # here we inject big quantities nto both buyer and seller sides... hopefully the injected traders will do a deal\n # pending_cust_orders[0].qty = big_qty\n #\n #\n # # pending_cust_orders[highest_seller_index-1].qty = big_qty\n #\n # if verbose: print ('t:%f SPIKE INJECTION (Post) Exchange %d, Published LOB=%s' % (time, e, str(lob)))\n #\n # print('t:%f, Spike Injection: , microp=%s, pending_cust_orders:' % (time, lob['microprice']) )\n # for order in pending_cust_orders: print('%s; ' % str(order))\n\n triggertime = 100\n replenish_period = 100\n highest_buyer_index = 10 # this buyer has the highest limit price\n highest_seller_index = 20\n big_qty = 200\n if time > (triggertime - 3*timestep) and ((time+3*timestep) % replenish_period) <= (2 * timestep):\n # sys.exit('Bailing at injection trigger, time = %f' % time)\n ##print \"inject at\", time\n for assigment in pending_cust_orders:\n if traders[assigment.trad_id].ttype == 'AAAA':\n assigment.qty = big_qty\n # print 'block order comes in'\n # print 'trad_id: %s, price: %i qty: %i , time : %i' %(assigment.trad_id,assigment.price,assigment.qty,assigment.time)\n\n # get a quote (or None) from a randomly chosen trader\n\n # first randomly select a trader id\n old_tid = tid\n while tid == old_tid:\n tid = list(traders.keys())[random.randint(0, len(traders) - 1)]\n\n # currently, all quotes/orders are issued only to the single exchange at exchanges[0]\n # it is that exchange's responsibility to then deal with Order Protection / trade-through (Reg NMS Rule611)\n # i.e. the exchange logic could/should be extended to check the best LOB price of each other exchange\n # that is yet to be implemented here\n # if((time >= replenish_period and time % replenish_period <= 0.001)):\n # print 'time: %f' %(time)\n # tid = 'B00'\n # order = traders[tid].getorder(time, time_left, lobs[0], verbose)\n # print str(order);\n # print '11111111111111111111111'\n #\n # else:\n # order = traders[tid].getorder(time, time_left, lobs[0], verbose)\n\n # if((time >= replenish_period and time % replenish_period <= 0.05)):\n # print 'time: %f' %(time)\n # tid = 'B00'\n # order = traders[tid].getorder(time, time_left, lobs[0], verbose)\n # print str(order);\n # print '11111111111111111111111'\n #\n # else:\n # order = traders[tid].getorder(time, time_left, lobs[0], verbose)\n\n order = traders[tid].getorder(time, time_left, lobs[0], verbose)\n\n\n\n if order != None:\n # print ''\n # print ''\n # print ''\n # print('Trader Order: %s' % str(order))\n\n order.myref = traders[tid].orders[0].assignmentid # attach customer order ID to this exchange order\n if verbose: print('Order with myref=%s' % order.myref)\n\n # Sanity check: catch bad traders here\n traderprice = traders[tid].orders[0].price\n if order.otype == 'Ask' and order.price < traderprice: sys.exit('Bad ask: Trader.price %s, Quote: %s' % (traderprice,order))\n if order.otype == 'Bid' and order.price > traderprice: sys.exit('Bad bid: Trader.price %s, Quote: %s' % (traderprice,order))\n\n\n # how many quotes does this trader already have sat on an exchange?\n\n if len(traders[tid].quotes) >= traders[tid].max_quotes :\n # need to clear a space on the trader's list of quotes, by deleting one\n # new quote replaces trader's oldest previous quote\n # bit of a kludge -- just deletes oldest quote, which is at head of list\n # THIS SHOULD BE IN TRADER NOT IN MAIN LOOP?? TODO\n can_order = traders[tid].quotes[0]\n if verbose: print('> can_order %s' % str(can_order))\n can_order.ostyle = \"CAN\"\n if verbose: print('> can_order %s' % str(can_order))\n\n # send cancellation to exchange\n exch_response = exchanges[0].process_order(time, can_order, process_verbose)\n exch_msg = exch_response['trader_msgs']\n tape_sum = exch_response['tape_summary']\n\n if verbose:\n print('>Exchanges[0]ProcessOrder: tradernquotes=%d, quotes=[' % len(traders[tid].quotes))\n for q in traders[tid].quotes: print('%s' % str(q))\n print(']')\n for t in traders:\n if len(traders[t].orders) > 0:\n # print(\">Exchanges[0]ProcessOrder: Tyme=%5.2d TID=%s Orders[0]=%s\" % (time, traders[t].tid, traders[t].orders[0]))\n NOP = 0\n if len(traders[t].quotes) > 0:\n # print(\">Exchanges[0]ProcessOrder: Tyme=%5.2d TID=%s Quotes[0]=%s\" % (time, traders[t].tid, traders[t].quotes[0]))\n NOP = 0\n\n # do the necessary book-keeping\n # NB this assumes CAN results in a single message back from the exchange\n traders[tid].bookkeep(exch_msg[0], time, bookkeep_verbose)\n\n if verbose:\n # print('post-check: tradernquotes=%d, quotes=[' % len(traders[tid].quotes))\n for q in traders[tid].quotes: print('%s' % str(q))\n print(']')\n for t in traders:\n if len(traders[t].orders) > 0:\n # print(\"PostCheck Tyme=%5.2d TID=%s Orders[0]=%s\" % (time, traders[t].tid, traders[t].orders[0]))\n if len(traders[t].quotes) > 0:\n # print(\"PostCheck Tyme=%5.2d TID=%s Quotes[0]=%s\" % (time, traders[t].tid, traders[t].quotes[0]))\n NOP = 0\n\n if len(traders[t].orders) > 0 and traders[t].orders[0].astyle == \"CAN\":\n sys.stdout.flush()\n sys.exit(\"CAN error\")\n\n\n # add order to list of live orders issued by this trader\n traders[tid].quotes.append(order)\n\n if verbose: print('Trader %s quotes[-1]: %s' % (tid, traders[tid].quotes[-1]))\n\n # send this order to exchange and receive response\n exch_response = exchanges[0].process_order(time, order, process_verbose)\n exch_msgs = exch_response['trader_msgs']\n tape_sum = exch_response['tape_summary']\n\n # because the order just processed might have changed things, now go through each\n # order resting at the exchange and see if it can now be processed\n # applies to AON, ICE, OSO, and OCO\n\n\n\n\n\n #print('Exch_Msgs: ')\n # if exch_msgs == None: pass\n # else:\n # for msg in exch_msgs:\n # print('Msg=%s' % msg)\n\n if exch_msgs != None and len(exch_msgs) > 0:\n # messages to process\n for msg in exch_msgs:\n if verbose: print('Message: %s' % msg)\n traders[msg.tid].bookkeep(msg, time, bookkeep_verbose)\n\n\n # traders respond to whatever happened\n # needs to be updated for multiple exchanges\n lob = exchanges[0].publish_lob(time, tape_depth, lob_verbose)\n\n s = '%6.2f, ' % time\n for t in traders:\n # NB respond just updates trader's internal variables\n # doesn't alter the LOB, so processing each trader in\n # sequence (rather than random/shuffle) isn't a problem\n traders[t].respond(time, lob, tape_sum, respond_verbose)\n\n # if traders[t].ttype == 'ISHV':\n # print('%6.2f, ISHV Print, %s' % (time, str(traders[t])))\n # lq = traders[t].lastquote\n # print('lq = %s' % lq)\n # if lq != None :\n # price = lq.price\n # else: price = None\n # if price == None: s = s + '-1, '\n # else: s = s + '%s, ' % price\n # prices_data_file.write('%s\\n' % s)\n\n # if (lob['microprice'] == None or lob['midprice'] == None):\n # print 'microprice is none'\n # print 'midprice is none '\n # print 'microprice: '\n # print lob['microprice']\n # print 'midprice: '\n # print lob['midprice']\n # print 'bid anon:'\n # print lob['bids']['lob']\n # print 'ask anon:'\n # print lob['asks']['lob']\n\n time = time + timestep\n\n\n # end of an experiment -- dump the tape\n exchanges[0].dump_tape(sess_id, tapedumpfile, 'keep',traders)\n\n\n # traders dump their blotters\n for t in traders:\n tid = traders[t].tid\n ttype = traders[t].ttype\n balance = traders[t].balance\n blot = traders[t].blotter\n blot_len = len(blot)\n # build csv string for all events in blotter\n csv = ''\n estr = \"TODO \"\n for e in blot:\n # print(blot)\n # estr = '%s, %s, %s, %s, %s, %s, ' % (e['type'], e['time'], e['price'], e['qty'], e['party1'], e['party2'])\n csv = csv + estr\n blotterdumpfile.write('%s, %s, %s, %s, %s, %s\\n' % (sess_id, tid, ttype, balance, blot_len, csv))\n\n # write summary trade_stats for this experiment (end-of-session summary ONLY)\n for e in range(n_exchanges):\n trade_stats(sess_id, traders, summaryfile, time, exchanges[e].publish_lob(time, None, lob_verbose))\n\n\n\n#############################\n\n# # Below here is where we set up and run a series of experiments\n\n\nif __name__ == \"__main__\":\n\n\n start_time = 0.0\n end_time = 200.0\n duration = end_time - start_time\n\n range1 = (50,50)\n range2 = (20,20)\n supply_schedule = [{'from': 0, 'to': end_time, 'ranges': [range1], 'stepmode': 'fixed'}]\n # supply_schedule = [{'from': 0, 'to': 100, 'ranges': [(50,50)], 'stepmode': 'fixed'},\n # {'from': 100, 'to': 200, 'ranges': [(50,150)], 'stepmode': 'fixed'},\n # {'from': 200, 'to': 300, 'ranges': [(50,150)], 'stepmode': 'fixed'},\n # {'from': 300, 'to': 500, 'ranges': [(50,50)], 'stepmode': 'fixed'}\n #\n # ]\n\n range3 = (150, 150)\n range4 = (100, 100)\n demand_schedule = [{'from': 0, 'to': end_time, 'ranges': [range3], 'stepmode': 'fixed'}]\n # demand_schedule = [{'from': 0, 'to': 100, 'ranges': [(150,150)], 'stepmode': 'fixed'},\n # {'from': 100, 'to': 200, 'ranges': [(50,150)], 'stepmode': 'fixed'},\n # {'from': 200, 'to': 300, 'ranges': [(150,150)], 'stepmode': 'fixed'},\n # {'from': 300, 'to': 500, 'ranges': [(50,150)], 'stepmode': 'fixed'}\n #\n # ]\n\n order_sched = {'sup': supply_schedule, 'dem': demand_schedule,\n 'interval': 100,\n 'timemode': 'periodic'}\n\n ## 'AAAA' holds the block order\n buyers_spec = [('AAA',10),('AAAA',10)]\n sellers_spec = [ ('AA',10),('IAA_MLOFI',10)]\n # buyers_spec = [('BID_IGDX_3', 10), ('BID_IZIP_3', 10), ('BID_IAA_3', 10),('BID_ISHV_3', 10), ('AAAA', 10)]\n # sellers_spec = [('BID_IGDX_3', 10), ('BID_IZIP_3', 10), ('BID_IAA_3', 10),('BID_ISHV_3', 10), ('AAAA', 10)]\n traders_spec = {'sellers':sellers_spec, 'buyers':buyers_spec}\n\n sys.stdout.flush()\n\n fname = 'Mybalances.csv'\n summary_data_file = open(fname, 'w')\n\n fname = 'Mytapes.csv'\n tape_data_file = open(fname, 'w')\n\n fname = 'Myblotters.csv'\n blotter_data_file = open(fname, 'w')\n\n for session in range(100):\n sess_id = 'Test%02d' % session\n print('Session %s; ' % sess_id)\n\n\n market_session(sess_id, start_time, end_time, traders_spec, order_sched, summary_data_file, tape_data_file, blotter_data_file, True, False)\n\n summary_data_file.close()\n tape_data_file.close()\n blotter_data_file.close()\n\n print('\\n Experiment Finished')\n" }, { "alpha_fraction": 0.5574297308921814, "alphanum_fraction": 0.6104417443275452, "avg_line_length": 22.739999771118164, "blob_id": "c79fd333e4527a13be14e42bab431f561e6db9ab", "content_id": "65fa96881fe932fb9208e4cdd21b41255cce5be4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1245, "license_type": "permissive", "max_line_length": 103, "num_lines": 50, "path": "/ZhenZhang/source/dataAnalysis/hypothesisTest.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\n\r\n\r\nimport scipy.stats as stats\r\nimport csv\r\ncsv_file = open(\"../Mybalances.csv\",\"r\")\r\ncsv_reader = csv.reader(csv_file);\r\n\r\ny1 = []\r\ny2 = []\r\ny3 = []\r\ny4 = []\r\nname1 = None\r\nname2 = None\r\nname3 = None\r\nname4 = None\r\n\r\n\r\ncount = 0\r\nfor item in csv_reader:\r\n y1.append(float(item[5]))\r\n y2.append(float(item[17]))\r\n # y3.append(int(float(item[13])))\r\n # y4.append(int(float(item[17])))\r\n name1 = item[2]\r\n name2 = item[14]\r\n # name3 = item[10]\r\n # name4 = item[14]\r\n # print '%s,%s'%(item[5],item[9])\r\n count += 1\r\n\r\n\r\nu_statistic, pVal = stats.mannwhitneyu(y1, y2,alternative='less')\r\n\r\nprint \"u_statistic is %f\"%u_statistic;\r\nprint \"p value is %f\"%pVal;\r\n\r\n\r\nimport numpy as np\r\n#create 95% confidence interval for population mean weight\r\nprint np.mean(y2)\r\nprint stats.t.interval(alpha=0.95, df=len(y2)-1, loc=np.mean(y2), scale=stats.sem(y2))[1] - np.mean(y2)\r\nprint \"\"\r\nprint np.mean(y1)\r\nprint stats.t.interval(alpha=0.95, df=len(y1)-1, loc=np.mean(y1), scale=stats.sem(y1))[1] - np.mean(y1)\r\nprint \"\"\r\n\r\ndf = []\r\n\r\nfor index in range(len(y1)):\r\n df.append(y2[index]-y1[index])\r\nprint np.mean(df)\r\nprint stats.t.interval(alpha=0.95, df=len(df)-1, loc=np.mean(df), scale=stats.sem(df))[1] - np.mean(df)\r\n\r\n" }, { "alpha_fraction": 0.5609151124954224, "alphanum_fraction": 0.5631201863288879, "avg_line_length": 60.49152374267578, "blob_id": "5aa7ba99b9a15b8266c53c944c29927cbea180c0", "content_id": "be2db3181d8c71f0868aec9fc9d235c9aa6ec446", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3628, "license_type": "permissive", "max_line_length": 140, "num_lines": 59, "path": "/ZhenZhang/source/BSE2_msg_classes.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "# Assignment:\n# The details of a customer's order/request, assigned to a trader\nclass Assignment:\n\n def __init__(self, customer_id, trader_id, otype, ostyle, price, qty, time, endtime, assignmentid):\n self.cust_id = customer_id # customer identifier\n self.trad_id = trader_id # trader this customer order is assigned to\n self.atype = otype # order type (buy or sell)\n self.astyle = ostyle # order style: MKT, LIM, etc\n self.price = price # price\n self.qty = qty # quantity\n self.time = time # timestamp: time at which customer issued order\n self.endtime = endtime # time at which order should expire (e.g. for GFD and AON orders)\n self.assignmentid = assignmentid # i.d. (unique identifier for each assignment)\n\n\n def __str__(self):\n return '[%s %s %s %s P=%03d Q=%s T=%5.2f AID:%d]' % \\\n (self.cust_id, self.trad_id, self.atype, self.astyle, self.price, self.qty, self.time, self.assignmentid)\n\n\n# Order/quote, submitted by trader to exchange\n# has a trader id, a type (buy/sell), a style (LIM, MKT, etc), a price,\n# a quantity, a timestamp, and a unique i.d.\n# The order-style may require additional parameters which are bundled into style_params (=None if not)\nclass Order:\n\n def __init__(self, trader_id, otype, ostyle, price, qty, time, endtime, orderid):\n self.tid = trader_id # trader i.d.\n self.otype = otype # order type (bid or ask -- what side of LOB is it for)\n self.ostyle = ostyle # order style: MKT, LIM, etc\n self.price = price # price\n self.qty = qty # quantity\n self.time = time # timestamp\n self.endtime = endtime # time at which exchange deletes order (e.g. for GFD and AON orders)\n self.orderid = orderid # quote i.d. (unique to each quote, assigned by exchange)\n self.myref = None # trader's own reference for this order -- used to link back to assignment-ID\n self.styleparams = None # style parameters -- initially null, filled in later\n\n def __str__(self):\n return '[%s %s %s P=%03d Q=%s T=%5.2f OID:%d Params=%s MyRef=%s]' % \\\n (self.tid, self.otype, self.ostyle, self.price, self.qty, self.time, self.orderid, str(self.styleparams), self.myref)\n\n\n# structure of the messages that the exchange sends back to the traders after processing an order\nclass Exch_msg:\n\n def __init__(self, trader_id, order_id, eventtype, transactions, revised_order, fee, balance):\n self.tid = trader_id # trader i.d.\n self.oid = order_id # order i.d.\n self.event = eventtype # what happened? (ACKnowledged|PARTial|FILLed|FAILure)\n self.trns = transactions # list of transactions (price, qty, etc) details for this order\n self.revo = revised_order # revised order as created by exchange matching engine\n self.fee = fee # exchange fee\n self.balance = balance # exchange's record of this trader's balance\n\n def __str__(self):\n return 'TID:%s OID:%s Event:%s Trns:%s RevO:%s Fee:%d Bal:%d' % \\\n (self.tid, self.oid, self.event, str(self.trns), str(self.revo), self.fee, self.balance)\n" }, { "alpha_fraction": 0.547066867351532, "alphanum_fraction": 0.6023192405700684, "avg_line_length": 21.934425354003906, "blob_id": "b96c014d811d05c7bbfe49b8767a1ffe5240f62f", "content_id": "270b6a23fa700e71c54aa2110fc048dab40188eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1466, "license_type": "permissive", "max_line_length": 102, "num_lines": 61, "path": "/ZhenZhang/source/dataAnalysis/matplotlib4.py", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "\r\n\r\nfrom matplotlib import pyplot as plot\r\nimport csv\r\nimport random\r\n\r\n\r\n\r\n\r\n\r\n\r\ncsv_file = open(\"../Mybalances.csv\",\"r\")\r\ncsv_reader = csv.reader(csv_file);\r\n\r\ny1 = []\r\ny2 = []\r\ny3 = []\r\ny4 = []\r\nname1 = None\r\nname2 = None\r\nname3 = None\r\nname4 = None\r\n\r\ncy1 = 0;\r\ncy2 = 0;\r\ncount = 0\r\nfor item in csv_reader:\r\n y1.append(int(float(item[5])))\r\n y2.append(int(float(item[17])))\r\n cy1 += int(float(item[5]))\r\n cy2 += int(float(item[17]))\r\n # y3.append(int(float(item[13])))\r\n # y4.append(int(float(item[17])))\r\n name1 = item[2]\r\n name2 = item[14]\r\n # name3 = item[10]\r\n # name4 = item[14]\r\n # print '%s,%s'%(item[5],item[9])\r\n count += 1\r\nx = range(1,count+1)\r\nfig, ax = plot.subplots()\r\n\r\ncost1 = cy1/count\r\ncost1_list = [cost1 for i in range(1,count+1)]\r\n\r\ncost2 = cy2/count\r\ncost2_list = [cost2 for i in range(1,count+1)]\r\n\r\nax.plot(x,y1,label=\"AA\")\r\nax.plot(x,y2,label=\"IAA\")\r\nax.plot(x,cost1_list,linestyle=\"dashed\", label= \"AA's average profit = \"+str(float((cy1+0.0)/count)))\r\nax.plot(x,cost2_list,linestyle=\"dashed\", label= \"IAA's average profit = \"+str(float((cy2+0.0)/count)))\r\n# ax.plot(x,y3,label=name3)\r\n# ax.plot(x,y4,label=name4)\r\n\r\n# xticks_label = [i*5 for i in range(1,21)]\r\n# plot.xticks( xticks_label)\r\n# yticks_label = [i*5 for i in range(10,30)]\r\n# plot.yticks( yticks_label)\r\nax.set_xlabel('trading day')\r\nax.set_ylabel('total profit in each trading day')\r\nax.legend()\r\nplot.savefig(\"./balance4.png\")\r\n\r\n" }, { "alpha_fraction": 0.720588207244873, "alphanum_fraction": 0.7271241545677185, "avg_line_length": 29.549999237060547, "blob_id": "9824c71b992001ad7c0f0f549dff178086e1eff9", "content_id": "eeb0dd587e3be5266cd3618ece25e5cc6ff7b6fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 612, "license_type": "permissive", "max_line_length": 161, "num_lines": 20, "path": "/ZhenZhang/README.md", "repo_name": "davecliff/BristolStockExchange", "src_encoding": "UTF-8", "text": "This folder holds the sourcecode developed by Zhen Zhang as part of his MSc project, supervised by Dave Cliff, at the University of Bristol, submitted in September 2020.\n\n\n\nThe main contribution is to adapt the multi-level order flow imbalance (MLOFI) to IAA.\n\nIt includes two modules:\n\n+ \"impact-sensitive\" module\n+ \"evaluation\" module\n\n\n\nIn [IAA_MLOFI.py](./source/IAA_MLOFI.py), it is the IAA with \"impact-sensitive\" module.\n\nIn [IAA_NEW.py](./source/IAA_NEW.py), it is the IAA with two modules.\n\nIn [IZIP_MLOFI.py](./source/IZIP_MLOFI.py), it is the ZZIZIP.\n\nIn [ZZISHV.py](./source/ZZISHV.py), it is the ZZISHV.\n\n" } ]
17
Arrokent/DeepNeuralNetwork
https://github.com/Arrokent/DeepNeuralNetwork
e140390c3966ad898f35ea8f817ae3e5a38af21d
e78a8a536e00562dada5bffd5a20928ba6875010
1a5d9723a47ea008e8a7f341637586b1bef47840
refs/heads/master
2023-01-21T12:15:27.132398
2020-12-01T09:21:24
2020-12-01T09:21:24
317,487,938
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6217391490936279, "alphanum_fraction": 0.647826075553894, "avg_line_length": 13.4375, "blob_id": "56d48dee7be8e55e25d41bebc040215a50d6d67a", "content_id": "2a9381b3891c1a55104c2bc17aaa887cb937ea7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 362, "license_type": "no_license", "max_line_length": 30, "num_lines": 16, "path": "/readme.md", "repo_name": "Arrokent/DeepNeuralNetwork", "src_encoding": "UTF-8", "text": "# ็Œซๅƒ่ฏ†ๅˆซ\n## ๅ…ณ้”ฎๅญ—\nๅ›พๅƒ่ฏ†ๅˆซ๏ผŒpython๏ผŒpytorch๏ผŒๅดๆฉ่พพ๏ผŒAndrew\n## ้กน็›ฎ็ป“ๆž„\n1. dnn.app_utils_v2 \n ็ฅž็ป็ฝ‘็ปœ็”จๅˆฐ็š„ๅ‡ฝๆ•ฐๅทฅๅ…ท\n2. dnn_app.py \n ้กน็›ฎ่ฟ่กŒไธปๅ‡ฝๆ•ฐ๏ผŒไธปๆ–‡ไปถ\n3. test_catvnoncat \n ๆต‹่ฏ•้›†\n4. train_catvnoncat \n ่ฎญ็ปƒ้›†\n4. my_image\n ้ข„ๆต‹็”จๅ›พ็‰‡\n### ้กน็›ฎ่ฟ่กŒ\n็‚นๅ‡ปdnn_app.py๏ผŒ็›ดๆŽฅ่ฟ่กŒ" }, { "alpha_fraction": 0.6275528073310852, "alphanum_fraction": 0.6500518918037415, "avg_line_length": 30.40217399597168, "blob_id": "771ba8d9f26f3fc90ed9dc0d10be1d35c59355ad", "content_id": "ff581fea588edaaa7a6d4233cd820ebc2114c7d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2889, "license_type": "no_license", "max_line_length": 114, "num_lines": 92, "path": "/dnn_app.py", "repo_name": "Arrokent/DeepNeuralNetwork", "src_encoding": "UTF-8", "text": "from PIL import Image\nfrom dnn_app_utils_v2 import *\n\n# set default size of plots\nplt.rcParams['figure.figsize'] = (5.0, 4.0)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# make the random matrix same every time\nnp.random.seed(1)\n\ntrain_x_orig, train_y, test_x_orig, test_y, classes = load_data()\n\n# Reshape the training and test examples\ntrain_x_flatten = train_x_orig.reshape(train_x_orig.shape[0],\n -1).T # The \"-1\" makes reshape flatten the remaining dimensions\ntest_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T\n\n# Standardize data to have feature values between 0 and 1.\ntrain_x = train_x_flatten / 255.\ntest_x = test_x_flatten / 255.\n\nprint(\"train_x's shape: \" + str(train_x.shape))\nprint(\"test_x's shape: \" + str(test_x.shape))\n\n# 5-layer model\nlayers_dims = [12288, 20, 7, 5, 1]\n\n\ndef L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False): # lr was 0.009\n\n np.random.seed(1)\n costs = [] # keep track of cost\n\n # Parameters initialization.\n parameters = initialize_parameters_deep(layers_dims)\n\n # Loop (gradient descent)\n for i in range(0, num_iterations):\n\n # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.\n AL, caches = L_model_forward(X, parameters)\n\n # Compute cost.\n cost = compute_cost(AL, Y)\n\n # Backward propagation.\n grads = L_model_backward(AL, Y, caches)\n\n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n\n # Print the cost every 10 training example\n if print_cost and i % 10 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n if print_cost and i % 10 == 0:\n costs.append(cost)\n\n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n return parameters\n\n\n# run the training\nparameters = L_layer_model(train_x, train_y, layers_dims, num_iterations=2500, print_cost=True)\n\n# get the training set accuracy\npred_train = predict(train_x, train_y, parameters)\n\n# get the test set accuracy\npred_test = predict(test_x, test_y, parameters)\n\n# this is the name of the image file\nmy_image = \"my_image.jpg\"\n# the true class of your image (1 -> cat, 0 -> non-cat)\nmy_label_y = [1]\n\nnum_px = train_x_orig.shape[1]\n\nfname = my_image\nimage = np.array(plt.imread(fname))\nmy_image = np.array(Image.fromarray(image).resize(size=(num_px, num_px))).reshape((num_px * num_px * 3, 1))\nmy_predicted_image = predict(my_image, my_label_y, parameters)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your L-layer model predicts a \\\"\" + classes[\n int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")\n" } ]
2
sfujiwara/tfmodel
https://github.com/sfujiwara/tfmodel
8ea05f1eacdf66ef74ddfc4204f3cc085828b7a9
3d92cea19b955649d4c370efa40b62551e1f31d7
ddf2e39af493d5401c5d2c4a31ea4aa53a30fe1f
refs/heads/master
2021-01-13T04:19:07.170181
2019-04-04T01:07:31
2019-04-04T01:07:31
77,469,114
10
2
null
2016-12-27T16:37:58
2017-10-29T14:04:31
2017-11-08T08:06:54
Python
[ { "alpha_fraction": 0.704952597618103, "alphanum_fraction": 0.7207586765289307, "avg_line_length": 26.941177368164062, "blob_id": "b3708a545c77d2fe8fdbb8c544fb23b89d33553a", "content_id": "4fab63a5fea7f0475866eb3618d83aaf7cee456e", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 949, "license_type": "permissive", "max_line_length": 196, "num_lines": 34, "path": "/README_VGG16.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# VGG 16\n\nSee [here](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) for details of VGG 16.\n\n## Basic Usage\n\n```python\nimport tfmodel\n\ndef train_input_fn():\n # Implement input pipeline for training data\n # x must be a dict or a Tensor with shape [batch_size, height, width, 3]\n # y must be a one-hot Tensor with shape [batch_size, n_classes]\n return {\"images\": xs}, ys\n\nclf = tfmodel.estimator.VGG16Classifier(\n fc_units=[],\n n_classes=2,\n model_dir=\"outputs\",\n pretrained_checkpoint_dir=\"models\"\n)\n\nclf.train(input_fn=train_input_fn, steps=10000)\n```\n\nIf `pretrained_checkpoint_dir` is specified, pre-trained checkpoint will be automatically downloaded from [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models).\n\n## Practical Sample Code\n\nTODO\n\n## License\n\nThe pre-trained model is released under [Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)." }, { "alpha_fraction": 0.5748031735420227, "alphanum_fraction": 0.5984252095222473, "avg_line_length": 14.875, "blob_id": "33cdd269c19b0f3710db9aba46b2429aa403d503", "content_id": "abb8ddf6e895d78de0b5e51d2f603cdcaaddffb5", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "permissive", "max_line_length": 23, "num_lines": 8, "path": "/tfmodel/__init__.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "from . import vgg\nfrom . import estimator\nfrom . import util\n\n\n__author__ = \"Shuhei Fujiwara\"\n__version__ = \"0.1.0\"\n__license__ = \"MIT\"\n" }, { "alpha_fraction": 0.5577319860458374, "alphanum_fraction": 0.592783510684967, "avg_line_length": 25.216217041015625, "blob_id": "c935ccce2dcf03e5cb1c95eb350e4425f2c7017c", "content_id": "235064e0eb1e9aa5f70f251f94d9f07d4c914243", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 970, "license_type": "permissive", "max_line_length": 112, "num_lines": 37, "path": "/examples/style-transfer/README.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# Neural Algorithm of Artistic Style with TensorFlow\n\nAn implementation of \"[A Neural Algorithm of Artistic Style](https://arxiv.org/abs/1508.06576)\" with TensorFlow.\n\n<p align = 'left'>\n<img src=\"img/contents/tensorflow.jpg\" width=181>\n<img src=\"img/styles/chojugiga.jpg\" width=181>\n<img src=\"img/results/tf_x_chojugiga.jpg\" width=181>\n</p>\n\n## Requirements\n\n* TensorFlow\n* Pillow\n* SciPy\n\n## How to Run\n\nDownload the repository as below:\n\n```\ngit clone https://github.com/sfujiwara/tfmodel.git\ncd examples/style-transfer\n```\n\nRun Python script as below:\n\n```\npython style_transfer.py --style img/styles/chojugiga.jpg \\\n --content img/contents/tensorflow.jpg \\\n --tv_weight 0.0 \\\n --content_weight 0.95 \\\n --style_weight=0.05 \\\n --summary_iterations 20 \\\n --iterations 1500 \\\n --learning_rate 1.0\n```\n" }, { "alpha_fraction": 0.5851021409034729, "alphanum_fraction": 0.5963155627250671, "avg_line_length": 34.41843795776367, "blob_id": "9550b121615929abcda259417511589e1329fefd", "content_id": "69c81a03975f3e0e6a73496bef14eee2151843a5", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4994, "license_type": "permissive", "max_line_length": 108, "num_lines": 141, "path": "/tfmodel/estimator/vgg_estimator.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "import os\nimport tensorflow as tf\nimport tfmodel\n\n\ndef metric_fn(labels, logits):\n n_classes = logits.shape[1].value\n n_classes = min(n_classes, 10)\n with tf.name_scope(\"metrics_accuracy\"):\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(labels=tf.argmax(labels, 1), predictions=tf.argmax(logits, 1)),\n \"mean_par_accuracy\": tf.metrics.mean_per_class_accuracy(\n labels=tf.argmax(labels, 1), predictions=tf.argmax(logits, 1), num_classes=n_classes\n ),\n }\n # Add recalls of each classes to eval metrics\n with tf.name_scope(\"metrics_recall\"):\n for k in [1, 3]:\n for i in range(n_classes):\n eval_metric_ops[\"recall_at_{}/class_{}\".format(k, i)] = tf.metrics.recall_at_k(\n labels=tf.argmax(labels, 1), predictions=logits, k=k, class_id=i\n )\n # Add precisions of each classes to eval metrics\n with tf.name_scope(\"metrics_precision\"):\n for k in [1]:\n for i in range(n_classes):\n eval_metric_ops[\"precision_at_{}/class_{}\".format(k, i)] = tf.metrics.sparse_precision_at_k(\n labels=tf.argmax(labels, 1), predictions=logits, k=k, class_id=i\n )\n return eval_metric_ops\n\n\ndef vgg16_model_fn(features, labels, mode, params, config=None):\n if isinstance(features, dict):\n xs = features[list(features.keys())[0]]\n else:\n xs = features\n tfmodel.vgg.build_vgg16_graph(xs, trainable=False, reuse=False)\n pool5 = tf.get_default_graph().get_tensor_by_name(\"vgg_16/pool5:0\")\n hidden = tf.layers.flatten(pool5)\n with tf.variable_scope(\"additional_layers\"):\n for i, n_unit in enumerate(params[\"fc_units\"]):\n hidden = tf.layers.dense(hidden, n_unit, activation=tf.nn.relu, name=\"fc{}\".format(i))\n logits = tf.layers.dense(hidden, params[\"n_classes\"], name=\"logits\")\n prob = tf.nn.softmax(logits)\n loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits, label_smoothing=1e-7)\n optim = params[\"optimizer\"]\n train_op = optim.minimize(loss=loss, global_step=tf.train.get_global_step())\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=prob,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=metric_fn(labels=labels, logits=logits)\n )\n return estimator_spec\n\n\nclass VGG16Classifier(tf.estimator.Estimator):\n\n def __init__(\n self,\n fc_units,\n n_classes,\n optimizer=tf.train.ProximalAdagradOptimizer(1e-2),\n model_dir=None,\n config=None,\n warm_start_from=None,\n ):\n params = {\n \"fc_units\": fc_units,\n \"n_classes\": n_classes,\n \"optimizer\": optimizer,\n }\n super(VGG16Classifier, self).__init__(\n model_fn=vgg16_model_fn,\n model_dir=model_dir,\n params=params,\n config=config,\n warm_start_from=warm_start_from,\n )\n\n\ndef vgg16_tpu_model_fn(features, labels, mode, params, config=None):\n if isinstance(features, dict):\n xs = features[list(features.keys())[0]]\n else:\n xs = features\n tfmodel.vgg.build_vgg16_graph(xs, trainable=False, reuse=False)\n pool5 = tf.get_default_graph().get_tensor_by_name(\"vgg_16/pool5:0\")\n hidden = tf.layers.flatten(pool5)\n with tf.variable_scope(\"additional_layers\"):\n for i, n_unit in enumerate(params[\"fc_units\"]):\n hidden = tf.layers.dense(hidden, n_unit, activation=tf.nn.relu, name=\"fc{}\".format(i))\n logits = tf.layers.dense(hidden, params[\"n_classes\"], name=\"logits\")\n prob = tf.nn.softmax(logits)\n loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits, label_smoothing=1e-7)\n optim = params[\"optimizer\"]\n train_op = optim.minimize(loss=loss, global_step=tf.train.get_global_step())\n\n estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": prob},\n loss=loss,\n train_op=train_op,\n metric_fn=(metric_fn, [labels, logits])\n )\n return estimator_spec\n\n\nclass VGG16TPUClassifier(tf.contrib.tpu.TPUEstimator):\n\n def __init__(\n self,\n fc_units,\n n_classes,\n optimizer=tf.train.ProximalAdagradOptimizer(1e-2),\n model_dir=None,\n config=None,\n use_tpu=False,\n train_batch_size=32,\n ):\n if use_tpu:\n optimizer_ = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n else:\n optimizer_ = optimizer\n params = {\n \"fc_units\": fc_units,\n \"n_classes\": n_classes,\n \"optimizer\": optimizer_,\n }\n\n super(VGG16TPUClassifier, self).__init__(\n model_fn=vgg16_model_fn,\n model_dir=model_dir,\n params=params,\n config=config,\n train_batch_size=train_batch_size,\n use_tpu=use_tpu\n )\n" }, { "alpha_fraction": 0.8307692408561707, "alphanum_fraction": 0.8615384697914124, "avg_line_length": 42.33333206176758, "blob_id": "e82096745db3b9f422e5cc562c30b4554e94d531", "content_id": "4ea33470ef9ade0533592b6ee34b9279d572df85", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "permissive", "max_line_length": 45, "num_lines": 3, "path": "/tfmodel/estimator/__init__.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "from .vgg_estimator import VGG16Classifier\nfrom .vgg_estimator import VGG16TPUClassifier\nfrom .ocsvm_estimator import OneClassSVM\n" }, { "alpha_fraction": 0.6098577976226807, "alphanum_fraction": 0.637322187423706, "avg_line_length": 41.92631530761719, "blob_id": "4785ad9ecd65c3062225f6068342398a192ee317", "content_id": "cb1db85f51e802912e97a823b2e48c3a1a9ea97d", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4078, "license_type": "permissive", "max_line_length": 97, "num_lines": 95, "path": "/tfmodel/util.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport hashlib\nimport os\nimport tarfile\nfrom six.moves import urllib\nimport tempfile\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nfrom . import vgg\n\n\ndef download_vgg16_checkpoint(\n dest_directory=os.path.join(os.environ.get(\"HOME\"), \".tfmodel\", \"models\")\n):\n data_url = \"http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz\"\n dest_file = os.path.join(dest_directory, \"vgg_16.ckpt\")\n if tf.gfile.Exists(dest_file) and _verify_vgg16_checkpoint_hash(dest_file):\n tf.logging.info(\"{} already exists\".format(dest_file))\n else:\n tarfile_path = os.path.join(tempfile.tempdir, os.path.basename(data_url))\n if tf.gfile.Exists(tarfile_path) and _verify_vgg16_tar_hash(tarfile_path):\n\n tf.logging.info(\"{} already exists\".format(tarfile_path))\n else:\n tf.logging.info(\"downloading vgg16 checkpoint from {}\".format(data_url))\n urllib.request.urlretrieve(data_url, filename=tarfile_path)\n tf.logging.info(\"extracting {}\".format(tarfile_path))\n x = tarfile.open(name=tarfile_path, mode=\"r\")\n fileobj = x.extractfile(x.getmembers()[0].name)\n tf.logging.info(\"saving vgg16 checkpoint to {}\".format(dest_file))\n tf.gfile.MakeDirs(dest_directory)\n with tf.gfile.Open(dest_file, \"w\") as f:\n f.write(fileobj.read())\n\n\ndef _verify_vgg16_checkpoint_hash(checkpoint_path):\n with tf.gfile.Open(checkpoint_path) as f:\n is_valid = hashlib.md5(f.read()).hexdigest() == \"c69996ee68fbd93d810407da7b3c0242\"\n return is_valid\n\n\ndef _verify_vgg16_tar_hash(tar_path):\n with tf.gfile.Open(tar_path) as f:\n is_valid = hashlib.md5(f.read()).hexdigest() == \"520bc6e4c73a89b5c0d8b9c4eaa8861f\"\n return is_valid\n\n\ndef _default_resize_image_fn(img):\n img = tf.image.resize_bicubic([img], [224, 224])[0]\n # img = tf.image.resize_bicubic([img], [170, 225])[0]\n # img = tf.image.resize_image_with_crop_or_pad(img, 224, 224)\n img.set_shape([224, 224, 3])\n img = tf.cast(img, dtype=tf.uint8)\n img = tf.cast(img, dtype=tf.float32)\n return img\n\n\ndef embed(input_exps, output_dir, resize_image_fn=_default_resize_image_fn):\n save_dir = os.path.join(os.environ.get(\"HOME\", \"\"), \".tfmodel\")\n metadata = [[\"file\", \"label\"]]\n images = []\n with tf.Graph().as_default() as g:\n for i, exp in enumerate(input_exps):\n file_list = tf.gfile.Glob(exp)\n for f in file_list:\n img = tf.image.decode_jpeg(tf.read_file(f), channels=3)\n img = resize_image_fn(img)\n metadata.append([f, str(i)])\n images.append(img)\n img_tensor = tf.stack(images)\n features = vgg.build_vgg16_graph(img_tensor=img_tensor, include_top=False)\n init_op = tf.global_variables_initializer()\n vgg16_saver = tf.train.Saver(tf.get_collection(vgg.VGG16_GRAPH_KEY))\n with tf.Session() as sess:\n sess.run(init_op)\n download_vgg16_checkpoint(save_dir)\n vgg16_saver.restore(sess, os.path.join(save_dir, \"vgg_16.ckpt\"))\n features_array = sess.run(features)\n with tf.Graph().as_default() as g:\n img_var = tf.Variable(features_array, name=\"images\")\n saver = tf.train.Saver(var_list=[img_var])\n init_op = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init_op)\n projector_config = projector.ProjectorConfig()\n embedding = projector_config.embeddings.add()\n embedding.tensor_name = img_var.name\n embedding.metadata_path = \"metadata.tsv\"\n summary_write = tf.summary.FileWriter(output_dir)\n projector.visualize_embeddings(summary_writer=summary_write, config=projector_config)\n saver.save(sess, os.path.join(output_dir, \"embeddings.ckpt\"))\n # Convert list to TSV\n metadata = \"\\n\".join([\"\\t\".join(i) for i in metadata])\n tf.gfile.Open(os.path.join(output_dir, \"metadata.tsv\"), \"w\").write(metadata)\n" }, { "alpha_fraction": 0.5755208134651184, "alphanum_fraction": 0.6171875, "avg_line_length": 25.482759475708008, "blob_id": "9ae81eebb56905f3143b3fd7fd0fd435ee6b6bdd", "content_id": "d65bff1df6b2df8fd998e30fe3edc226f455171b", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "permissive", "max_line_length": 90, "num_lines": 29, "path": "/example_vgg16.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nimport tfmodel\n\n\ndef train_input_fn():\n x = {\"image\": tf.constant(np.ones([32, 224, 224, 3]), dtype=np.float32, name=\"image\")}\n y = tf.constant(np.zeros([32, 1000], dtype=np.float32), name=\"label\")\n return x, y\n\n\ndef main():\n warm_start_settings = tf.estimator.WarmStartSettings(\n ckpt_to_initialize_from=\"vgg_16.ckpt\",\n vars_to_warm_start=\"vgg_16*\",\n )\n clf = tfmodel.estimator.VGG16Classifier(\n fc_units=[],\n n_classes=1000,\n optimizer=tf.train.GradientDescentOptimizer(1e-2),\n model_dir=\"outputs\",\n config=None,\n warm_start_from=warm_start_settings,\n )\n clf.train(input_fn=train_input_fn, max_steps=2)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.692105233669281, "avg_line_length": 14.199999809265137, "blob_id": "e02885a50e2efe2dc9c7506f3233668ac976ca5e", "content_id": "b8efaf742b1a75a4f458a13d51526d302aa948d8", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "permissive", "max_line_length": 48, "num_lines": 25, "path": "/examples/image-embedding/README.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# Embedding Visualization on TensorBoard\n\n## Embed images\n\n```python\nimport tfmodel\n\ninput_exps=[\n \"examples/image-embedding/img/yasuna/*.png\",\n \"examples/image-embedding/img/sonya/*.png\",\n]\n\ntfmodel.util.embed(\n input_exps=input_exps,\n output_dir=\"embeddings\"\n)\n```\n\n## Result\n\n<img src=\"result.png\" width=500>\n\n## Resources\n\n* http://killmebaby.tv/special_icon.html\n" }, { "alpha_fraction": 0.5855957865715027, "alphanum_fraction": 0.6199095249176025, "avg_line_length": 44.72413635253906, "blob_id": "dbf5910e557875f4b72e13d6fda057f6f24989ad", "content_id": "87461e769f5ea76327db4b980802a7cc5d6a6c6b", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2652, "license_type": "permissive", "max_line_length": 95, "num_lines": 58, "path": "/tests/test_vgg16.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim import nets\nimport numpy as np\nfrom keras.applications.vgg16 import VGG16\n\nimport tfmodel\n\nTFMODEL_DIR = os.path.join(os.environ.get(\"HOME\"), \".tfmodel\")\nDEFAULT_VGG16_CHECKPOINT_PATH = os.path.join(TFMODEL_DIR, \"models\", \"vgg_16.ckpt\")\n\nclass TestVgg16(unittest.TestCase):\n\n def setUp(self):\n tfmodel.util.download_vgg16_checkpoint()\n\n def test_vgg16_with_keras(self):\n # Load sample image\n img = np.random.normal(size=[1, 224, 224, 3])\n # Try VGG 16 model converted for TensorFlow\n with tf.Graph().as_default() as g:\n img_ph = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])\n logits_tf = tfmodel.vgg.build_vgg16_graph(img_tensor=img_ph, include_top=True)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess=sess, save_path=DEFAULT_VGG16_CHECKPOINT_PATH)\n p_tf = sess.run(tf.nn.softmax(logits=logits_tf), feed_dict={img_ph: img})[0]\n # Try VGG 16 model included in Keras\n model = VGG16(weights=\"imagenet\", include_top=True)\n p_keras = model.predict(img[:, :, :, ::-1])[0]\n np.testing.assert_array_almost_equal(p_tf.flatten(), p_keras.flatten())\n\n def test_vgg16_with_tfslim(self):\n # Load sample image\n img = np.random.normal(size=[1, 224, 224, 3])\n # Try VGG 16 model converted for TensorFlow\n with tf.Graph().as_default() as g:\n img_ph = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])\n logits_tf = tfmodel.vgg.build_vgg16_graph(img_tensor=img_ph, include_top=True)\n saver = tf.train.Saver()\n tf.summary.FileWriter(logdir=\"summary/tfmodel\", graph=g)\n with tf.Session() as sess:\n saver.restore(sess=sess, save_path=DEFAULT_VGG16_CHECKPOINT_PATH)\n logits_tf = sess.run(logits_tf, feed_dict={img_ph: img})[0]\n # Try VGG 16 model included in TF-Slim\n with tf.Graph().as_default() as g:\n x_ph = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])\n net, end_points = nets.vgg.vgg_16(inputs=x_ph, num_classes=1000, is_training=False)\n saver = tf.train.Saver()\n tf.summary.FileWriter(logdir=\"summary/slim\", graph=g)\n with tf.Session() as sess:\n saver.restore(sess, DEFAULT_VGG16_CHECKPOINT_PATH)\n logits_slim = sess.run(net, feed_dict={x_ph: img})[0]\n np.testing.assert_array_almost_equal(logits_tf.flatten(), logits_slim.flatten())\n" }, { "alpha_fraction": 0.632796049118042, "alphanum_fraction": 0.6461107134819031, "avg_line_length": 31.454545974731445, "blob_id": "acce9f94e1afc456e4eef09876e1165f746d9296", "content_id": "293981f1bf67e994d708a014d8296d2c0bf04d52", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1427, "license_type": "permissive", "max_line_length": 82, "num_lines": 44, "path": "/examples/style-transfer/gce/startup.sh", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\napt-get update\napt-get -y upgrade\n\n# Install google-fluentd\ncurl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh\nsha256sum install-logging-agent.sh\nsudo bash install-logging-agent.sh\n\n# Create config file for google-fluentd\nFLUENTD_CONF_FILE=\"/etc/google-fluentd/config.d/python.conf\"\necho \"<source>\" > ${FLUENTD_CONF_FILE}\necho \" type tail\" >> ${FLUENTD_CONF_FILE}\necho \" format json\" >> ${FLUENTD_CONF_FILE}\necho \" path /var/log/python/*.log,/var/log/python/*.json\" >> ${FLUENTD_CONF_FILE}\necho \" read_from_head true\" >> ${FLUENTD_CONF_FILE}\necho \" tag python\" >> ${FLUENTD_CONF_FILE}\necho \"</source>\" >> ${FLUENTD_CONF_FILE}\n\n# Create log directory for Python script\nmkdir -p /var/log/python\n\n# Restart google-fluentd\nservice google-fluentd restart\n\napt-get -y install python-pip\npip install -U pip\npip install tensorflow\npip install Pillow\npip install scipy\n\ngit clone https://github.com/sfujiwara/tfmodel.git\ncd tfmodel/examples/style-transfer\n\ntensorboard --logdir=summary &\npython style_transfer.py --style img/styles/chojugiga.jpg \\\n --content img/contents/tensorflow.jpg \\\n --tv_weight 0.0 \\\n --content_weight 0.95 \\\n --style_weight=0.05 \\\n --summary_iterations 20 \\\n --iterations 3000 \\\n --learning_rate 1.0" }, { "alpha_fraction": 0.6581196784973145, "alphanum_fraction": 0.688034176826477, "avg_line_length": 35, "blob_id": "5ac4a85cd918de3e4e3a560ff4bc425d91e52f2e", "content_id": "c06e7c8bd2d1e85cf63a3909597275e7d922dcc6", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 936, "license_type": "permissive", "max_line_length": 90, "num_lines": 26, "path": "/examples/style-transfer/gce/create_instance.sh", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nINSTANCE_NAME=\"style-transfer-`date '+%Y%m%d%H%M%S'`\"\nPROJECT_ID=`gcloud config list project --format \"value(core.project)\"`\n\n# Create Compute Engine instance\ngcloud compute --project ${PROJECT_ID} instances create ${INSTANCE_NAME} \\\n --zone \"us-central1-b\" \\\n --machine-type \"n1-highcpu-16\" \\\n --network \"default\" \\\n --maintenance-policy \"MIGRATE\" \\\n --scopes \"https://www.googleapis.com/auth/cloud-platform\" \\\n --tags \"tensorboard-server\" \\\n --image \"ubuntu-1604-xenial-v20170330\" \\\n --image-project \"ubuntu-os-cloud\" \\\n --boot-disk-size \"200\" \\\n --boot-disk-type \"pd-standard\" \\\n --boot-disk-device-name ${INSTANCE_NAME} \\\n --metadata-from-file startup-script=startup.sh\n\n# Create firewall rule\ngcloud compute --project ${PROJECT_ID} firewall-rules create \"default-allow-tensorboard\" \\\n --allow tcp:6006 \\\n --network \"default\" \\\n --source-ranges \"0.0.0.0/0\" \\\n --target-tags \"tensorboard-server\"\n" }, { "alpha_fraction": 0.6296961307525635, "alphanum_fraction": 0.650724470615387, "avg_line_length": 43.056495666503906, "blob_id": "4acb2fdde084387114b43d5d446c8e76c9840629", "content_id": "fa7e8ebec55283ec0f893c161866398163356c84", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7799, "license_type": "permissive", "max_line_length": 120, "num_lines": 177, "path": "/examples/style-transfer/style_transfer.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport sys\nimport numpy as np\nfrom scipy.misc import imread, imresize, imsave\nimport tensorflow as tf\nfrom PIL import Image\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))\n\nimport tfmodel\n\n# Parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--content\", type=str, default=\"img/contents/tensorflow_logo.jpg\")\nparser.add_argument(\"--style\", type=str, default=\"img/styles/chouju_sumou.jpg\")\nparser.add_argument(\"--output_dir\", type=str, default=\"outputs\")\nparser.add_argument(\"--content_weight\", type=float, default=0.02)\nparser.add_argument(\"--style_weight\", type=float, default=0.98)\nparser.add_argument(\"--tv_weight\", type=float, default=0.0001)\nparser.add_argument(\"--iterations\", type=int, default=3000)\nparser.add_argument(\"--learning_rate\", type=float, default=1e1)\nparser.add_argument(\"--summary_iterations\", type=int, default=20)\nargs, unknown_args = parser.parse_known_args()\n\nCONTENT = args.content\nSTYLE = args.style\nOUTPUT_DIR = args.output_dir\nCONTENT_WEIGHT = args.content_weight\nSTYLE_WEIGHT = args.style_weight\nTV_WEIGHT = args.tv_weight\nLEARNING_RATE = args.learning_rate\nITERATIONS = args.iterations\nSUMMARY_ITERATIONS = args.summary_iterations\n\nCONTENT_LAYERS = [\n # \"vgg_16/conv4/conv4_2/Relu:0\",\n \"vgg_16/conv5/conv5_2/Relu:0\",\n]\nSTYLE_LAYERS = [\n \"vgg_16/conv1/conv1_1/Relu:0\",\n \"vgg_16/conv2/conv2_1/Relu:0\",\n \"vgg_16/conv3/conv3_1/Relu:0\",\n \"vgg_16/conv4/conv4_1/Relu:0\",\n \"vgg_16/conv5/conv5_1/Relu:0\",\n]\nPRE_TRAINED_MODEL_PATH = os.path.join(os.environ.get(\"HOME\", \"\"), \".tfmodel\", \"vgg16\", \"vgg_16.ckpt\")\n\n\ndef compute_target_style(style_img):\n with tf.Graph().as_default() as g1:\n width, height, _ = style_img.shape\n img_ph = tf.placeholder(tf.float32, [1, width, height, 3])\n tfmodel.vgg.build_vgg16_graph(img_tensor=tfmodel.vgg.preprocess(img_ph), include_top=False, trainable=False)\n style_layer_tensors = [g1.get_tensor_by_name(i) for i in STYLE_LAYERS]\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, PRE_TRAINED_MODEL_PATH)\n style_layers = sess.run(style_layer_tensors, feed_dict={img_ph: [style_img]})\n return style_layers\n\n\ndef compute_target_content(content_img):\n with tf.Graph().as_default() as g1:\n img_ph = tf.placeholder(tf.float32, [1, 224, 224, 3])\n _ = tfmodel.vgg.build_vgg16_graph(img_tensor=tfmodel.vgg.preprocess(img_ph), include_top=False, trainable=False)\n content_layer_tensors = [g1.get_tensor_by_name(i) for i in CONTENT_LAYERS]\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, PRE_TRAINED_MODEL_PATH)\n content_layers = sess.run(content_layer_tensors, feed_dict={img_ph: [content_img]})\n return content_layers\n\n\ndef build_content_loss(content_layer_tensors, target_content_layer_arrays):\n with tf.name_scope(\"content_loss\"):\n content_losses = []\n for i in range(len(target_content_layer_arrays)):\n l = tf.losses.mean_squared_error(content_layer_tensors[i], target_content_layer_arrays[i])\n content_losses.append(l)\n content_loss = tf.reduce_sum(content_losses) * tf.constant(CONTENT_WEIGHT, name=\"content_weight\")\n tf.summary.scalar(\"content_loss\", content_loss)\n return content_loss\n\n\ndef build_style_loss(style_layer_tensors, target_style_layer_arrays):\n target_style_gram_arrays = []\n for sla in target_style_layer_arrays:\n f = np.reshape(sla, (-1, sla.shape[3]))\n target_style_gram_arrays.append(np.matmul(f.T, f) / sla.size)\n\n with tf.name_scope(\"style_loss\"):\n style_loss = 0\n for i, slt in enumerate(style_layer_tensors):\n f = tf.reshape(slt, (-1, slt.get_shape()[3].value))\n style_gram = tf.matmul(tf.transpose(f), f) / slt.get_shape().num_elements()\n style_loss += tf.losses.mean_squared_error(style_gram, target_style_gram_arrays[i])\n style_loss *= tf.constant(STYLE_WEIGHT, name=\"style_weight\")\n tf.summary.scalar(\"style_loss\", style_loss)\n return style_loss\n\n\ndef build_total_variation_loss(img_tensor):\n with tf.name_scope(\"total_variation_loss\"):\n h = img_tensor.get_shape()[1].value\n w = img_tensor.get_shape()[2].value\n tv_loss = tf.reduce_mean([\n tf.nn.l2_loss(img_tensor[:, 1:, :, :] - img_tensor[:, :w-1, :, :]),\n tf.nn.l2_loss(img_tensor[:, :, 1:, :] - img_tensor[:, :, :w-1, :])\n ]) * tf.constant(TV_WEIGHT, name=\"tv_weight\")\n tf.summary.scalar(\"total_variation_loss\", tv_loss)\n return tv_loss\n\n\ntfmodel.util.download_vgg16_checkpoint(\n dest_directory=os.path.join(os.environ.get(\"HOME\", \"\"), \".tfmodel\", \"vgg16\"),\n data_url=tfmodel.vgg.MODEL_URL\n)\ncontent_img = imresize(imread(CONTENT, mode=\"RGB\"), [224, 224]).astype(np.float32)\nstyle_img = imread(STYLE, mode=\"RGB\").astype(np.float32)\ntarget_style_layer_arrays = compute_target_style(style_img)\ntarget_content_layers = compute_target_content(content_img)\n\nwith tf.Graph().as_default() as g2:\n img_tensor = tf.Variable(tf.random_normal([1, 224, 224, 3], stddev=1., mean=128), name=\"generated_image\")\n tf.summary.image(\"generated_image\", img_tensor, max_outputs=100)\n tf.summary.image(\"content\", np.expand_dims(content_img, axis=0))\n tf.summary.image(\"style\", np.expand_dims(style_img, axis=0))\n tfmodel.vgg.build_vgg16_graph(img_tensor=tfmodel.vgg.preprocess(img_tensor), include_top=False, trainable=False)\n content_layer_tensors = [g2.get_tensor_by_name(i) for i in CONTENT_LAYERS]\n style_layer_tensors = [g2.get_tensor_by_name(i) for i in STYLE_LAYERS]\n\n # Build content loss\n content_loss = build_content_loss(\n content_layer_tensors=content_layer_tensors,\n target_content_layer_arrays=target_content_layers\n )\n # Build style loss\n style_loss = build_style_loss(\n style_layer_tensors=style_layer_tensors,\n target_style_layer_arrays=target_style_layer_arrays\n )\n # Build total variation loss\n tv_loss = build_total_variation_loss(img_tensor=img_tensor)\n # Build total loss\n with tf.name_scope(\"total_loss\"):\n total_loss = content_loss + style_loss + tv_loss\n tf.summary.scalar(\"total_loss\", total_loss)\n\n optim = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(total_loss)\n init_op = tf.global_variables_initializer()\n vgg16_saver = tf.train.Saver(tf.get_collection(tfmodel.vgg.VGG16_GRAPH_KEY))\n summary_writer = tf.summary.FileWriter(\"summary/neuralstyle\", graph=g2)\n merged = tf.summary.merge_all()\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n sess.run(init_op)\n vgg16_saver.restore(sess, PRE_TRAINED_MODEL_PATH)\n for i in range(ITERATIONS):\n if i % SUMMARY_ITERATIONS == 0:\n if not os.path.exists(OUTPUT_DIR):\n os.mkdir(OUTPUT_DIR)\n im = np.clip(sess.run(img_tensor)[0], 0, 255).astype(np.uint8)\n Image.fromarray(im).save(os.path.join(OUTPUT_DIR, \"output-{}.jpg\".format(i)), quality=95)\n # imsave(\n # os.path.join(OUTPUT_DIR, \"output-{}.jpg\".format(i)),\n # np.clip(sess.run(img_tensor)[0], 0, 255).astype(np.uint8)\n # )\n summary = sess.run(merged)\n summary_writer.add_summary(summary, i)\n _, t, c, s, tv = sess.run([optim, total_loss, content_loss, style_loss, tv_loss])\n print(\n \"Iter: {0} TotalLoss: {1} ContentLoss: {2} StyleLoss: {3} TotalVariationLoss: {4}\".format(\n i, t, c, s, tv\n )\n )\n\n" }, { "alpha_fraction": 0.6271130442619324, "alphanum_fraction": 0.643022894859314, "avg_line_length": 33.28409194946289, "blob_id": "043b3c7616067b6d9eb7677c6ae07e03d8fa3ed5", "content_id": "a3abd809ae2ea427259adfa23f765e5a6f77b55d", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3017, "license_type": "permissive", "max_line_length": 104, "num_lines": 88, "path": "/examples/image-classification/trainer/task.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport tensorflow as tf\nimport tfmodel\nimport csv\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--train_csv\", type=str)\nparser.add_argument(\"--test_csv\", type=str)\nparser.add_argument(\"--output_path\", type=str, default=\"outputs\")\nparser.add_argument(\"--learning_rate\", type=float, default=0.01)\nparser.add_argument(\"--batch_size\", type=int, default=2)\nparser.add_argument(\"--n_classes\", type=int, default=2)\nparser.add_argument(\"--n_epochs\", type=int, default=1)\nargs, unknown_args = parser.parse_known_args()\n\nN_CLASSES = args.n_classes\nBATCH_SIZE = args.batch_size\nTRAIN_CSV = args.train_csv\nTEST_CSV = args.test_csv\nLEARNING_RATE = args.learning_rate\nOUTPUT_PATH = args.output_path\nN_EPOCHS = args.n_epochs\n\nCHECKPOINT_DIR = os.path.join(OUTPUT_PATH, \"checkpoints\")\n\n\ndef build_queue(csv_file, num_epochs=None):\n with tf.name_scope(\"queue\"):\n filename_queue = tf.train.string_input_producer([csv_file], num_epochs=num_epochs)\n reader = tf.TextLineReader(skip_header_lines=1)\n key, value = reader.read(filename_queue)\n img_file_path, label = tf.decode_csv(value, record_defaults=[[\"\"], [1]])\n image = tf.image.decode_jpeg(tf.read_file(img_file_path), channels=3)\n assert image.get_shape().as_list() == [None, None, 3]\n image = tf.image.resize_images(image, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n assert image.get_shape().as_list() == [224, 224, 3]\n # image.set_shape([224, 224, 3])\n image = tf.cast(image, tf.float32)\n # label = tf.one_hot(label, depth=N_CLASSES)\n image_batch, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=BATCH_SIZE,\n num_threads=64,\n capacity=512,\n min_after_dequeue=0\n )\n return image_batch, label_batch\n\n\ndef get_input_fn(csv_file, n_class, n_epoch):\n\n def input_fn():\n image_batch, label_batch = build_queue(csv_file=csv_file, num_epochs=n_epoch)\n return {\"images\": image_batch}, tf.one_hot(label_batch, depth=n_class)\n\n return input_fn\n\n\ndef generate_csv(filenames, output, labels):\n image_file_paths = []\n image_labels = []\n for i, f in enumerate(filenames):\n files = tf.gfile.Glob(filename=f)\n l = [labels[i]] * len(files)\n image_file_paths.extend(files)\n image_labels.extend(l)\n result = zip(image_file_paths, image_labels)\n with tf.gfile.Open(output, mode=\"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(result)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.DEBUG)\n run_config = tf.estimator.RunConfig().replace(\n save_summary_steps=1,\n )\n clf = tfmodel.estimator.VGG16Classifier(\n fc_units=[128],\n n_classes=2,\n model_dir=\"model\",\n config=run_config\n )\n input_fn = get_input_fn(csv_file=\"img/train.csv\", n_epoch=5, n_class=2)\n clf.train(input_fn=input_fn)\n" }, { "alpha_fraction": 0.6768759489059448, "alphanum_fraction": 0.6952526569366455, "avg_line_length": 18.81818199157715, "blob_id": "3be188c1e8147094777170d7c4552940b44992da", "content_id": "4192eba8608b2e981cd725028fcfbe00ba79a700", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 653, "license_type": "permissive", "max_line_length": 68, "num_lines": 33, "path": "/README_OCSVM.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# One-Class Support Vector Machine\n\n## Basic Usage\n\n```python\nimport tensorflow as tf\nimport tfmodel\n\n\ndef train_input_fn():\n # Implement input pipeline for training data\n # x must be a dict to a Tensor\n # y is None since One-Class SVM is unsupervised learning \n return {\"x\": xs}, None\n\n\nfeature_columns = [tf.feature_column.numeric_column(\"x\", shape=[2])]\n\nclf = tfmodel.estimator.OneClassSVM(\n feature_columns=feature_columns,\n nu=0.1,\n rffm_input_dim=2,\n rffm_output_dim=2000,\n rffm_stddev=10.,\n optimizer=tf.train.ProximalAdagradOptimizer(1e-1),\n)\n\nclf.train(input_fn=train_input_fn)\n```\n\n## Practical Sample Code\n\nTODO" }, { "alpha_fraction": 0.5782060623168945, "alphanum_fraction": 0.6226834654808044, "avg_line_length": 28.9777774810791, "blob_id": "5ea41220d9474859b3b1e579885c118744613eb1", "content_id": "68552f0bfc9b26d6faed720a70aac04fcb0a646d", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1349, "license_type": "permissive", "max_line_length": 90, "num_lines": 45, "path": "/example_vgg16_tpu.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nimport tfmodel\n\n\ndef train_input_fn():\n x = {\"image\": tf.constant(np.ones([32, 224, 224, 3]), dtype=np.float32, name=\"image\")}\n y = tf.constant(np.zeros([32, 1000], dtype=np.float32), name=\"label\")\n ds = tf.data.Dataset.from_tensor_slices((x, y))\n return ds\n\n\ndef tpu_train_input_fn(params):\n x = {\"image\": tf.constant(np.ones([32, 224, 224, 3]), dtype=np.float32, name=\"image\")}\n y = tf.constant(np.zeros([32, 1000], dtype=np.float32), name=\"label\")\n ds = tf.data.Dataset.from_tensor_slices((x, y))\n ds = ds.repeat().apply(\n tf.contrib.data.batch_and_drop_remainder(32)\n )\n return ds\n\n\ndef main():\n tpu_config = tf.contrib.tpu.TPUConfig(num_shards=8)\n estimator_config = tf.contrib.tpu.RunConfig(\n master=\"\",\n save_checkpoints_steps=1000,\n save_summary_steps=1000,\n session_config=tf.ConfigProto(log_device_placement=True),\n model_dir=\"outputs\",\n tpu_config=tpu_config,\n )\n clf = tfmodel.estimator.VGG16TPUClassifier(\n fc_units=[],\n n_classes=1000,\n optimizer=tf.train.GradientDescentOptimizer(1e-2),\n model_dir=\"outputs\",\n config=estimator_config,\n train_batch_size=32\n )\n clf.train(input_fn=tpu_train_input_fn, max_steps=2)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.875, "avg_line_length": 5.833333492279053, "blob_id": "20ac5e58b414f368e468f700815beaecba17a622", "content_id": "20dfaea61cf0545625b3dac90923c10682291981", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "permissive", "max_line_length": 10, "num_lines": 6, "path": "/requirements.txt", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "pillow\nh5py\nnumpy\nscipy\ntensorflow\nkeras" }, { "alpha_fraction": 0.6416772603988647, "alphanum_fraction": 0.6467598676681519, "avg_line_length": 18.674999237060547, "blob_id": "37c0d1db1f1ace363e2e78c496b8b62427cef182", "content_id": "155f84877b0dc6dc9eabfea7c4875e2c6b4e81fe", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 787, "license_type": "permissive", "max_line_length": 70, "num_lines": 40, "path": "/examples/image-classification/README.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# Transfer Learning using VGG16\n\n## CSV File for Training\n\n```csv\n# image_path,label\npath/to/image/file.jpg,0\n...\n```\n\n## Training on Local\n\n```\npython -m trainer.task \\\n --train_csv=${TRAIN_CSV} \\\n --test_csv=${TEST_CSV} \\\n --output_path=outputs\n```\n\n## Training on ML Engine\n\n```\npip install -r requirements.txt -t .\n```\n\n```\nJOB_NAME=\"hoge`date '+%Y%m%d%H%M%S'`\"\nPROJECT_ID=`gcloud config list project --format \"value(core.project)\"`\nTRAIN_CSV=<path to csv file for training>\n\ngcloud ml-engine jobs submit training ${JOB_NAME} \\\n --package-path=trainer \\\n --module-name=trainer.task \\\n --staging-bucket=\"gs://${PROJECT_ID}-ml\" \\\n --region=us-central1 \\\n --config=mlengine.yaml \\\n -- \\\n --train_csv=${TRAIN_CSV} \\\n --output_path=<output directory for summary and model>\n```\n" }, { "alpha_fraction": 0.7134146094322205, "alphanum_fraction": 0.7134146094322205, "avg_line_length": 19.5, "blob_id": "9d14822dbff7bb445aaa73d40cd7f32b529e0818", "content_id": "a38e0d5757a1aad475b3a43f2e9fa68f3676584d", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "permissive", "max_line_length": 36, "num_lines": 8, "path": "/examples/image-classification/setup.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "from setuptools import find_packages\nfrom setuptools import setup\n\nsetup(\n name=\"trainer\",\n packages=[\"trainer\", \"tfmodel\"],\n include_package_data=True,\n)\n" }, { "alpha_fraction": 0.5910589694976807, "alphanum_fraction": 0.6068156957626343, "avg_line_length": 31.29585838317871, "blob_id": "10189d24c8d08bc06c295d00ee34503508e98801", "content_id": "33dbdc8ac7c2517ede46af93c32e31d9824e2fd2", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5458, "license_type": "permissive", "max_line_length": 102, "num_lines": 169, "path": "/tfmodel/estimator/ocsvm_estimator.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n\ndef ocsvm_model_fn(features, labels, mode, params, config):\n assert isinstance(features, dict)\n feature_columns = params[\"feature_columns\"]\n kernel_mapper = tf.contrib.kernel_methods.RandomFourierFeatureMapper(\n input_dim=params[\"rffm_input_dim\"],\n output_dim=params[\"rffm_output_dim\"],\n stddev=params[\"rffm_stddev\"],\n name=\"rffm\"\n )\n with tf.name_scope(\"feature_mapping\"):\n # mapped_features = kernel_mapper.map(features[\"x\"])\n mapped_features = kernel_mapper.map(tf.feature_column.input_layer(features, feature_columns))\n\n weight = tf.Variable(\n tf.truncated_normal([params[\"rffm_output_dim\"], 1]),\n name=\"weight\", dtype=tf.float32, trainable=True,\n )\n rho = tf.Variable(0, name=\"rho\", dtype=tf.float32, trainable=True)\n tf.summary.scalar(name=\"rho\", tensor=rho)\n tf.summary.histogram(name=\"weight\", values=weight)\n\n y = tf.matmul(mapped_features, weight)\n decision_value = y - rho\n\n with tf.name_scope(\"regularizer\"):\n regularizer = tf.nn.l2_loss(weight)\n with tf.name_scope(\"hinge_loss\"):\n hinge_loss = tf.reduce_mean(tf.maximum(0., tf.add(rho, -y)))\n loss = hinge_loss + regularizer - tf.multiply(rho, params[\"nu\"])\n\n tf.summary.scalar(name=\"regularizer\", tensor=regularizer)\n tf.summary.histogram(name=\"decision_values\", values=y)\n tf.summary.scalar(name=\"hinge_loss\", tensor=hinge_loss)\n\n train_op = params[\"optimizer\"].minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n\n # Set eval metric\n if mode == tf.estimator.ModeKeys.EVAL:\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(labels=labels, predictions=tf.sign(decision_value)),\n \"recall\": tf.metrics.recall(labels=labels, predictions=tf.sign(decision_value)),\n \"precision\": tf.metrics.precision(labels=labels, predictions=tf.sign(decision_value))\n }\n else:\n eval_metric_ops = None\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=decision_value,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n # scaffold=scaffold,\n # export_outputs={\"prediction\": export_outputs},\n )\n return estimator_spec\n\n\nclass OneClassSVM(tf.estimator.Estimator):\n\n def __init__(\n self,\n feature_columns,\n nu,\n rffm_input_dim,\n rffm_output_dim,\n rffm_stddev,\n optimizer=tf.train.ProximalAdagradOptimizer(1e-2),\n model_dir=None,\n config=None\n ):\n params = {\n \"feature_columns\": feature_columns,\n \"nu\": nu,\n \"rffm_input_dim\": rffm_input_dim,\n \"rffm_output_dim\": rffm_output_dim,\n \"rffm_stddev\": rffm_stddev,\n \"optimizer\": optimizer,\n }\n super(OneClassSVM, self).__init__(\n model_fn=ocsvm_model_fn,\n model_dir=model_dir,\n params=params,\n config=config\n )\n\n\nif __name__ == \"__main__\":\n import numpy as np\n import matplotlib.pyplot as plt\n import json\n import os\n\n tf_conf = {\n \"cluster\": {\"master\": [\"localhost:2222\"]},\n \"task\": {\"index\": 0, \"type\": \"master\"}\n }\n os.environ[\"TF_CONFIG\"] = json.dumps(tf_conf)\n\n tf.logging.set_verbosity(tf.logging.DEBUG)\n x_train = np.random.multivariate_normal(mean=[1., 1.], cov=np.eye(2), size=100).astype(np.float32)\n x_eval = np.vstack([\n np.random.multivariate_normal(mean=[1., 1.], cov=np.eye(2), size=950).astype(np.float32),\n np.random.multivariate_normal(mean=[10., 10.], cov=np.eye(2), size=50).astype(np.float32)\n ])\n y_eval = np.array([1.]*950 + [-1.]*50).astype(np.float32)\n\n feature_columns = [tf.feature_column.numeric_column(\"x\", shape=[2])]\n\n config = tf.estimator.RunConfig(\n save_summary_steps=100,\n save_checkpoints_steps=2000,\n model_dir=\"outputs\"\n )\n\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": x_train},\n y=None,\n shuffle=True,\n batch_size=32,\n num_epochs=None\n )\n\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": x_eval},\n y=y_eval,\n shuffle=False,\n batch_size=32,\n num_epochs=1\n )\n\n clf = OneClassSVM(\n feature_columns=feature_columns,\n nu=0.1,\n rffm_input_dim=2,\n rffm_output_dim=2000,\n rffm_stddev=10.,\n optimizer=tf.train.ProximalAdagradOptimizer(1e-1),\n config=config\n )\n\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=10000)\n eval_spec = tf.estimator.EvalSpec(\n input_fn=eval_input_fn, start_delay_secs=0, throttle_secs=1, exporters=None\n )\n tf.estimator.train_and_evaluate(estimator=clf, train_spec=train_spec, eval_spec=eval_spec)\n\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": x_eval},\n y=y_eval,\n shuffle=False,\n num_epochs=1\n )\n result = np.array(list(clf.predict(predict_input_fn))).flatten()\n print(result)\n\n threshold = 0.\n ind_normal = result > threshold\n ind_outlier = result < threshold\n plt.plot(x_eval[ind_normal, 0], x_eval[ind_normal, 1], \"x\", label=\"Predicted as normal\")\n plt.plot(x_eval[ind_outlier, 0], x_eval[ind_outlier, 1], \"x\", label=\"Predicted as outlier\")\n plt.legend()\n plt.grid()\n plt.show()\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 44, "blob_id": "1c1f5992e9077e83b43dff60d2e4d04ed8f0fa8f", "content_id": "43a140aafaea68effe0a567243bfe5e88e49500f", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 45, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/examples/image-classification/requirements.txt", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "git+https://github.com/sfujiwara/tfmodel.git\n" }, { "alpha_fraction": 0.6192893385887146, "alphanum_fraction": 0.6218274235725403, "avg_line_length": 23.625, "blob_id": "2326e76ffc0521ada780764f19272277e8726d59", "content_id": "2bf3b1dad1be0eb9ccd96c7e3ec3904bed61a968", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "permissive", "max_line_length": 56, "num_lines": 16, "path": "/setup.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nfrom tfmodel import __license__, __author__, __version__\n\nsetup(\n name=\"tfmodel\",\n description=\"\",\n version=__version__,\n license=__license__,\n author=__author__,\n author_email=\"[email protected]\",\n url=\"https://github.com/sfujiwara/tfmodel\",\n packages=find_packages(),\n # install_requires=[\"tensorflow\"],\n)\n" }, { "alpha_fraction": 0.544971227645874, "alphanum_fraction": 0.598803699016571, "avg_line_length": 43.25490188598633, "blob_id": "7f5c5841be50df1e10f4f555c041cbeb2078fb6d", "content_id": "f317380db7722146aebf2792c921b3458b7d5309", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4514, "license_type": "permissive", "max_line_length": 101, "num_lines": 102, "path": "/tfmodel/resnet.py", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "import typing\nimport tensorflow as tf\n\n\nRESNET50_GRAPH_KEY = 'resnet50'\n\n\ndef resnet_conv2d(inputs, filters, kernel_size, strides, trainable, activation=tf.nn.relu):\n n_kernels = inputs.get_shape()[3].value\n w = tf.get_variable(\n name='weights',\n shape=[kernel_size, kernel_size, n_kernels, filters],\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(stddev=0.01),\n trainable=trainable,\n collections=[RESNET50_GRAPH_KEY],\n )\n b = tf.get_variable(\n name='biases',\n shape=[filters],\n dtype=tf.float32,\n initializer=tf.zeros_initializer(),\n trainable=trainable,\n collections=[RESNET50_GRAPH_KEY],\n )\n h = tf.nn.conv2d(inputs, w, strides=[1, strides, strides, 1], padding='SAME')\n h = tf.nn.bias_add(h, b)\n if activation is not None:\n h = activation(h)\n return h\n\n\ndef resnet_block(inputs, filters, trainable):\n # type: (tf.Tensor, typing.List, bool) -> tf.Tensor\n\n with tf.variable_scope(\"conv1\"):\n h = resnet_conv2d(inputs, filters=filters[0], kernel_size=1, strides=1, trainable=trainable)\n with tf.variable_scope(\"conv2\"):\n h = resnet_conv2d(h, filters=filters[1], kernel_size=3, strides=1, trainable=trainable)\n with tf.variable_scope(\"conv3\"):\n h = resnet_conv2d(h, filters=filters[2], kernel_size=1, strides=1, trainable=trainable)\n return h\n\n\ndef resnet50_feature(inputs, trainable=True):\n\n with tf.variable_scope(\"resnet_50_v1\"):\n # First convolution layer\n with tf.variable_scope(\"conv1\"):\n h = resnet_conv2d(inputs, filters=64, kernel_size=7, strides=2, trainable=trainable)\n # First pooling layer\n h = tf.nn.max_pool(h, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')\n with tf.variable_scope(\"block1\"):\n with tf.variable_scope(\"unit_1\"):\n h = resnet_block(h, filters=[64, 64, 256], trainable=trainable)\n with tf.variable_scope(\"unit_2\"):\n h = resnet_block(h, filters=[64, 64, 256], trainable=trainable)\n with tf.variable_scope(\"unit_3\"):\n h = resnet_block(h, filters=[64, 64, 256], trainable=trainable)\n with tf.variable_scope(\"block2\"):\n with tf.variable_scope(\"unit_1\"):\n h = resnet_block(h, filters=[128, 128, 512], trainable=trainable)\n with tf.variable_scope(\"unit_2\"):\n h = resnet_block(h, filters=[128, 128, 512], trainable=trainable)\n with tf.variable_scope(\"unit_3\"):\n h = resnet_block(h, filters=[128, 128, 512], trainable=trainable)\n with tf.variable_scope(\"unit_4\"):\n h = resnet_block(h, filters=[128, 128, 512], trainable=trainable)\n with tf.variable_scope(\"block3\"):\n with tf.variable_scope(\"unit_1\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"unit_2\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"unit_3\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"unit_4\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"unit_5\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"unit_6\"):\n h = resnet_block(h, filters=[256, 256, 1024], trainable=trainable)\n with tf.variable_scope(\"block4\"):\n with tf.variable_scope(\"unit_1\"):\n h = resnet_block(h, filters=[512, 512, 2048], trainable=trainable)\n with tf.variable_scope(\"unit_2\"):\n h = resnet_block(h, filters=[512, 512, 2048], trainable=trainable)\n with tf.variable_scope(\"unit_3\"):\n h = resnet_block(h, filters=[512, 512, 2048], trainable=trainable)\n return h\n\n\nif __name__ == '__main__':\n x = tf.placeholder(dtype=tf.float32, shape=[32, 224, 224, 3])\n result = resnet50_feature(x)\n print(result)\n # import tensorflow.contrib.slim.nets as nets\n # result, _ = nets.resnet_v1.resnet_v1_50(x)\n # print(result)\n # import tensorflow_hub as hub\n # m = hub.Module(\"https://tfhub.dev/google/imagenet/resnet_v2_50/classification/1\")\n # result = m(x)\n tf.summary.FileWriter('outputs', graph=tf.get_default_graph())\n" }, { "alpha_fraction": 0.748031497001648, "alphanum_fraction": 0.7539370059967041, "avg_line_length": 25.736841201782227, "blob_id": "e6d36552a00d76f2ad7a82bf006e06afba490038", "content_id": "99746f4f46b8dd3e5980c3c44fc6e6a928b8dfee", "detected_licenses": [ "CC-BY-4.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1016, "license_type": "permissive", "max_line_length": 205, "num_lines": 38, "path": "/README.md", "repo_name": "sfujiwara/tfmodel", "src_encoding": "UTF-8", "text": "# tfmodel\n\n[![CircleCI](https://circleci.com/gh/sfujiwara/tfmodel.svg?style=svg)](https://circleci.com/gh/sfujiwara/tfmodel)\n[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE)\n\nThis module includes pre-trained models converted for [TensorFlow](https://www.tensorflow.org/) and various [Canned Estimators](https://www.tensorflow.org/programmers_guide/estimators#pre-made_estimators).\n\n## Requirements\n\ntfmodel requires nothing but TensorFlow.\nOther libraries in [requirement.txt](requirements.txt) are required only for unit tests or examples.\n\n## Installation\n\n```\npip install git+https://github.com/sfujiwara/tfmodel\n```\n\n## Canned Estimators\n\n### VGG 16 Classifier\n\nSee [README_VGG16.md](README_VGG16.md).\n\n### One-Class SVM\n\nSee [README_OCSVM.md](README_OCSVM.md).\n\n## Unit Test\n\n```\npython -m unittest discover -v tests\n```\n\n## License\n\nThis module itself is released under MIT license.\n**Note that weights of existing pre-trained models follow their licenses respectively**.\n" } ]
23
sirrrik/content
https://github.com/sirrrik/content
686fc31d0b816c7631bd835dcad6b19b7a6cd6ed
514047f7a5a584794b45aaf6b90300f854ff5fba
1f14b15ac941543f683a494e97867ac44d9f0883
refs/heads/master
2023-06-19T16:06:28.575523
2021-07-18T06:50:44
2021-07-18T06:50:44
387,110,535
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7753222584724426, "alphanum_fraction": 0.7753222584724426, "avg_line_length": 30.941177368164062, "blob_id": "b74c81971fc4da6a573f7410c70d491a93e280d3", "content_id": "635c4c3e97e3677223ba41232ab57260f5d308c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 65, "num_lines": 17, "path": "/content_feed/content_feed/urls.py", "repo_name": "sirrrik/content", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom django.urls.conf import include\nfrom rest_framework import routers, views\nfrom content_api.views import ItemViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'Item', ItemViewSet, basename='Item')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^api/', include(router.urls)),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 31.14285659790039, "blob_id": "c7f57a84cd390c4fc13042b59d418d4b5139921f", "content_id": "3ca1953bf740ebd01f767ef815f75e094f4df275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 53, "num_lines": 7, "path": "/content_feed/content_api/serializers.py", "repo_name": "sirrrik/content", "src_encoding": "UTF-8", "text": "from rest_framework import fields, serializers\nfrom content_api.models import Item\n\nclass ItemSerializer(serializers.ModelSerializer):\n class Meta:\n model= Item\n fields = 'id', 'title', 'description','image'\n" }, { "alpha_fraction": 0.8108808398246765, "alphanum_fraction": 0.8108808398246765, "avg_line_length": 28.769229888916016, "blob_id": "2f338cfebc793714fe394a05920bc413bccdcc8d", "content_id": "6a0cea4b798e1ad143b786bcfcc5bede25c5d405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 50, "num_lines": 13, "path": "/content_feed/content_api/views.py", "repo_name": "sirrrik/content", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom content_api.models import Item\nfrom content_api.serializers import ItemSerializer\n\n\n# Create your views here.\n\nclass ItemViewSet(viewsets.ModelViewSet):\n queryset = Item.objects.all()\n serializer_class = ItemSerializer" } ]
3
sarunyou/OpenCVProject
https://github.com/sarunyou/OpenCVProject
ed68653fea4d699fd67c1895bd7fe7192b3c1a87
69b0c19cc2391c7219a584568fcb80d92915fb76
4b4efc6b8a604191e6211fc4041781e079d95af8
refs/heads/master
2021-01-22T19:21:54.618229
2017-04-21T09:31:39
2017-04-21T09:31:39
85,191,686
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5916110277175903, "alphanum_fraction": 0.6243739724159241, "avg_line_length": 34.22793960571289, "blob_id": "22946c9eea8e3874e9bb4a27a0d9f7471ee167d9", "content_id": "105164db75cc3e0164800e0893276dcfcd017790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4792, "license_type": "no_license", "max_line_length": 155, "num_lines": 136, "path": "/blendImage.py", "repo_name": "sarunyou/OpenCVProject", "src_encoding": "UTF-8", "text": "import glob\nimport cv2\nimport numpy as np\nimport os\nos.system('rm warpedImg*')\n\ndef getTranslationImage(templat, image, numberImage):\n # params for ShiTomasi corner detection\n feature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n # Parameters for lucas kanade optical flow\n lk_params = dict( winSize = (15,15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors\n color = np.random.randint(0,255,(100,3))\n p0 = cv2.goodFeaturesToTrack(templat, mask = None, **feature_params)\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(templat, image, p0, None, **lk_params)\n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n # draw the tracks\n xSum = 0\n ySum = 0\n xArray = []\n yArray = []\n countRound = 0\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n xArray.append(c-a)\n yArray.append(d-b)\n countRound += 1\n # frame = cv2.circle(image,(a,b),5,color[i].tolist(),-1)\n xArray.remove(max(xArray))\n xArray.remove(min(xArray))\n yArray.remove(max(yArray))\n yArray.remove(min(yArray))\n xAvg = sum(xArray) / len(xArray)\n yAvg = sum(yArray) / len(yArray)\n rows, cols = image.shape\n print 'xAvg', xAvg\n print 'yAvg', yAvg\n # transition frame by x equal xAvg, y equal yAvg\n M = np.float32([[1, 0, xAvg], [0, 1, yAvg]])\n dst = cv2.warpAffine(image, M, (cols, rows))\n # cv2.imwrite('imageCalled%d.jpg' % numberImage, image)\n cv2.imwrite('translationImg%d.jpg' % numberImage, dst)\n print 'getTranslationImage'\n return dst\n\ndef create_blank(width, height, rgb_color=(0, 0, 0)):\n \"\"\"Create new image(numpy array) filled with certain color in RGB\"\"\"\n # Create black blank image\n image = np.zeros((height, width, 3), np.uint8)\n\n # Since OpenCV uses BGR, convert the color first\n color = tuple(reversed(rgb_color))\n # Fill image with color\n image[:] = color\n\n return image\n\n\n\npaths = glob.glob('./last_img*')\nwhite = (255, 255, 255)\n# test\n# tempImage = cv2.imread(paths[0], 1);\n# print 1.0/len(paths)\n# blankImage = cv2.addWeighted(blankImage, 1.0 - 1.0/len(paths), tempImage, 1.0/len(paths), 0)\n\nMIN_MATCH_COUNT = 10\n\n# temporary comment\nresultImg = cv2.imread(paths[0], 0)\ntemplateSelectedImg = cv2.imread(paths[0], 0)\n\nwidth, height = templateSelectedImg.shape\nblankImage = create_blank(width, height, rgb_color=white)\nlenPaths = len(paths)\ncountUseableImg = 0\nsift = cv2.xfeatures2d.SIFT_create()\nfor path in paths[1:]:\n \n # Initiate SIFT detector\n cropImg = cv2.imread(path, 0)\n h, w = cropImg.shape\n # cv2.imwrite('test%d.jpg' % paths.index(path), cropImg)\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(templateSelectedImg,None)\n kp2, des2 = sift.detectAndCompute(cropImg,None)\n\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks = 50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n\n try:\n matches = flann.knnMatch(des1,des2,k=2)\n\n # store all the good matches as per Lowe's ratio test.\n good = []\n for m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n if len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,M)\n Mperspective = cv2.getPerspectiveTransform(dst, pts)\n warpedImg = cv2.warpPerspective(cropImg,Mperspective,(w,h))\n fileName = path[path.index('\\\\') +1 :path.rfind('.')]\n print 'fileName is',fileName\n cv2.imwrite('warpedImgfrom%s.jpg' % (fileName), warpedImg)\n print 'countUseableImg', countUseableImg\n # cropImg = getTranslationImage(templateSelectedImg, cropImg, countUseableImg)\n countUseableImg += 1\n alpha = 1.0 / countUseableImg\n # resultImg = cv2.addWeighted(resultImg, 1 - alpha, cropImg, alpha, 0)\n resultImg = cv2.addWeighted(resultImg, 1 - alpha, warpedImg, alpha, 0)\n else:\n pass\n except:\n pass\n\n \ncv2.imshow('resultImg', resultImg)\ncv2.imwrite('resultDontUseTransition.jpg', resultImg)\ncv2.waitKey(0)\ncv2.destoryAllWindows()\n\n" }, { "alpha_fraction": 0.6300868988037109, "alphanum_fraction": 0.6652949452400208, "avg_line_length": 32.661537170410156, "blob_id": "bad645fdbeb65527a8c3581a75f3ae43cec7956b", "content_id": "3bc521ecd76c131ef98a6c5544076587870a251a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 151, "num_lines": 65, "path": "/lk_track_simple.py", "repo_name": "sarunyou/OpenCVProject", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n# params for ShiTomasi corner detection\nfeature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n# Parameters for lucas kanade optical flow\nlk_params = dict( winSize = (15,15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors\ncolor = np.random.randint(0,255,(100,3))\n# Take first frame and find corners in it\nold_gray = cv2.imread('./last_img0.jpg', 0) \ncv2.imshow('template', old_gray)\np0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n# Create a mask image for drawing purposes\nmask = np.zeros_like(old_gray)\n\nframe_gray = translation_frame = cv2.imread('./last_img99.jpg', 0)\n# calculate optical flow\np1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n# Select good points\ngood_new = p1[st==1]\ngood_old = p0[st==1]\n# draw the tracks\nxSum = 0\nySum = 0\nxArray = []\nyArray = []\ncountRound = 0\n\nfor i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n xArray.append(c-a)\n yArray.append(d-b)\n countRound += 1\n mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame_gray,(a,b),5,color[i].tolist(),-1)\nprint 'max xArray' , max(xArray)\nprint 'max yArray' , max(yArray)\n# xArray.remove(max(xArray))\n# xArray.remove(min(xArray))\n# yArray.remove(max(yArray))\n# yArray.remove(min(yArray))\nxAvg = sum(xArray) / len(xArray)\nyAvg = sum(yArray) / len(yArray)\nprint 'xArray', [x for x in xArray]\nprint 'yArray', [x for x in yArray]\nprint 'xAvg', xAvg\nprint 'yAvg', yAvg\nrows, cols = frame.shape\nprint 'frame.shape', frame.shape\n# transition frame by x equal xAvg, y equal yAvg\nM = np.float32([[1, 0, xAvg], [0, 1, yAvg]])\ndst = cv2.warpAffine(translation_frame, M, (cols, rows))\ncv2.imshow('dst', dst)\n\nimg = cv2.add(frame,mask)\n# img = cv2.addWeighted(old_gray, 0.3, frame_gray, 0.7, 0)\ncv2.imshow('frame',img)\n# Now update the previous frame and previous points\nold_gray = frame_gray.copy()\np0 = good_new.reshape(-1,1,2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()" } ]
2
aeternocap/parsedcmd
https://github.com/aeternocap/parsedcmd
1943943530db7621af9c71a74148c9d350ffb781
f2b2b5f3d44b21dae6414b0ac90b61e6b4af2e0d
42878427f1b033b658ac63b5994ea07cda83bfec
refs/heads/master
2021-01-18T10:59:14.532666
2012-09-13T00:38:04
2012-09-13T00:57:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5967742204666138, "alphanum_fraction": 0.6290322542190552, "avg_line_length": 16.714284896850586, "blob_id": "5307ab379313a44f3d6b85e3490e60980554f851", "content_id": "c14f88796569e4f99c488bbab262156b57661d87", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "permissive", "max_line_length": 28, "num_lines": 7, "path": "/test/test_all.py", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "import sys\n\nsys.path.append(\".\")\nif sys.version_info[0] == 2:\n from _test_py2 import *\nelse:\n from _test_py3 import *\n" }, { "alpha_fraction": 0.6325391530990601, "alphanum_fraction": 0.6357587575912476, "avg_line_length": 39.8684196472168, "blob_id": "dc5c7ba9ee40967c828eb9b423bbcd4fded41644", "content_id": "2e9929e94aac6a47c3b25ccb1c928669db67116c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4659, "license_type": "permissive", "max_line_length": 79, "num_lines": 114, "path": "/README.md", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "ParsedCmd - A Cmd with argument list parsing\n============================================\n\nParsedCmd is an extension built around the excellent cmd module of the standard\nlibrary. Cmd allows one to build simple custom shells using `do_*` methods,\ntaking care in particular of the REPL loop and the interactive help. However,\nno facility is given for parsing the argument line (`do_*` methods are passed\nthe rest of the line as a single string argument).\n\nWith ParsedCmd, `do_*` methods can be type-annotated, either using Python\n3's function annotation syntax, or with the ad-hoc `annotate` decorator,\nallowing the dispatcher to parse the argument list for them. Arguments can\nalso be marked as keyword-only, either using Python 3's dedicated syntax, or\nwith the ad-hoc `kw_only` decorator, in which case they will be assigned only\nif given as explicit arguments, i.e. `method -option opt` translates into\n`do_method(option=opt)` if `option` is keyword-only.\n\nThese annotations can also used to enhance the output of the default `do_help`\nmethod, by setting the `show_usage` attribute of the ParsedCmd object to True.\n\nExample (Python 2.6-2.7)\n========================\n\n from parsedcmd import *\n\n class UI(ParsedCmd):\n # Non-annotated arguments default to str.\n # boolean is a utility function, that casts every string to True,\n # except \"f\", \"false\", \"off\" and \"0\" (case-insensitive).\n @annotate(flag=boolean, repeat=int)\n @kw_only(\"flag\", \"repeat\")\n def do_print(self, line=\"abc\", flag=True, repeat=1):\n \"\"\"Print a given string (defaults to \"abc\").\n Print nothing if -flag is set to false.\n Print multiple copies if -repeat N option is given.\n \"\"\"\n if flag:\n for i in range(repeat):\n print(line, file=self.stdout)\n\n # *args can also be annotated.\n # Python 2's usual limitations about mixing keyword arguments and *args\n # applies.\n @annotate(mul=int, nums=int)\n def do_multiply(self, mul, *nums):\n \"\"\"Print `mul` times the numbers given.\n \"\"\"\n for num in nums:\n print(mul * num, file=self.stdout)\n\n # Do not parse the argument line for do_shell.\n @gets_raw\n def do_shell(self, line):\n \"\"\"Evaluates the given line.\n \"\"\"\n eval(line)\n\nExample (Python 3)\n==================\n\n from parsedcmd import *\n\n class UI(ParsedCmd):\n def do_print(self, line=\"abc\", *, flag: boolean=True, repeat: int=1):\n \"\"\"Print a given string (defaults to \"abc\").\n Print nothing if -flag is set to false.\n Print multiple copies if -repeat N option is given.\n \"\"\"\n if flag:\n for i in range(repeat):\n print(line, file=self.stdout)\n\n def do_multiply(self, mul: int, *nums: int):\n \"\"\"Print `mul` times the numbers given.\n \"\"\"\n for num in nums:\n print(mul * num, file=self.stdout)\n\n @gets_raw\n def do_shell(self, line):\n \"\"\"Evaluates the given line.\n \"\"\"\n eval(line)\n\nRemarks\n=======\n\nThe parsing is done in the following steps:\n - the input line is passed to the `split()` method (by default\n `shlex.split()`), and the result is bound to the argument list of the\n `do_*` method.\n - initial options (`-opt val`) are assigned to keyword-only arguments (which\n can be simulated in Python 2 using the `@kw_only` decorator).\n - each value bound to an argument annotated with a callable, either through\n `@annotate([arg=callable]*)`, or through Python 3's function annotation\n syntax (`f(arg[=default]: callable)`), is passed to it; however, this does\n not affect default values),\n - if `do_*` has an annotated `*args` argument, then each element\n of args / each value in kwargs is casted.\n - in theory, `**kwargs` are also parsed and cast but there is currently\n effectively no way to assign to them.\n\nParsedCmd interacts imperfectly with decorated functions. Currently, it\nfollows the `__wrapped__` attribute until finding a function that either\ndoesn't have this attribute or is decorated with `@use_my_annotations`, uses\nthe signature and the annotations of this function to create the argument\nlist, which is then passed to the wrapper function. In particular, ParsedCmd\nprovides a `wraps` function that works like the one provided in functools, but\nalso sets the `__wrapped__` attribute (as in Python 3.3 or higher).\n\nTesting\n=======\n\nJust run `py.test` in the source folder.\n" }, { "alpha_fraction": 0.6342182755470276, "alphanum_fraction": 0.6430678367614746, "avg_line_length": 25.076923370361328, "blob_id": "9bb55fe0a70555e4d26dc7b84ea3d9c7ddca0749", "content_id": "d52f76a5b41368ea6d8a62d403a398f112edf2e8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "permissive", "max_line_length": 52, "num_lines": 13, "path": "/setup.py", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nsetup(\n name='parsedcmd',\n version='0.1.1',\n author='Antony Lee',\n author_email='[email protected]',\n py_modules=['parsedcmd'],\n url='http://github.com/anntzer/parsedcmd',\n license='LICENSE.txt',\n description='A cmd with argument list parsing.',\n long_description=open('README.md').read(),\n)\n" }, { "alpha_fraction": 0.5530441403388977, "alphanum_fraction": 0.556932806968689, "avg_line_length": 41.527130126953125, "blob_id": "5354e97ba4146a405750c26d63f383625b08430c", "content_id": "e63fd05aec533dede12ffdb8614946371f6d35aa", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16458, "license_type": "permissive", "max_line_length": 81, "num_lines": 387, "path": "/parsedcmd.py", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "\"\"\"ParsedCmd - A Cmd with argument list parsing\n\nInterpreters constructed with this class obey the same conventions as those\nconstructed with cmd.Cmd, except that `do_*' methods are passed parsed argument\nlists, instead of the raw input, as long as the method has not been decorated\nwith `@gets_raw`.\n\nThe parsing is done in the following steps:\n - the input line is passed to the `split()' method (by default\n `shlex.split()'), and the result is bound to the argument list of the\n `do_*' method.\n - initial options (`-opt val') are assigned to keyword-only arguments (which\n can be simulated in Python 2 using the `@kw_only' decorator).\n - each value bound to an argument annotated with a callable, either through\n `@annotate([arg=callable]*)', or through Python 3's function annotation\n syntax (`f(arg[=default]: callable)'), is passed to it; however, this does\n not affect default values),\n - if `do_*' has an annotated `*args' argument, then each element\n of args / each value in kwargs is casted.\n - in theory, `**kwargs' are also parsed and cast but there is currently\n effectively no way to assign to them.\n\nParsedCmd interacts imperfectly with decorated functions. Currently, it\nfollows the `__wrapped__` attribute until finding a function that either\ndoesn't have this attribute or is decorated with `@use_my_annotations`, uses\nthe signature and the annotations of this function to create the argument list,\nwhich is then passed to the wrapper function. In particular, ParsedCmd\nprovides a `wraps` function that works like the one provided in functools, but\nalso sets the `__wrapped__` attribute (as in Python 3.3 or higher).\n\nIf the `show_usage` attribute of a ParsedCmd is set to true (which can be done\nby passing it as a keyword argument to the constructor), then help messages\nthat are derived from docstrings (not those that are derived from a `help_*`\nmethod) are appended with a pretty-printed version of the method's signature,\nif the methods is not `@gets_raw`ed.\n\"\"\"\n\nfrom __future__ import print_function\nfrom collections import namedtuple\nfrom cmd import Cmd\nimport functools\nimport inspect\nimport itertools\nimport shlex\nimport sys\nimport textwrap\n\n__all__ = [\"gets_raw\", \"use_my_annotations\", \"ParsedCmd\", \"boolean\"]\n\nif sys.version_info[0] >= 3:\n getfullargspec = inspect.getfullargspec\n getcallargs = inspect.getcallargs\n\n basestring = str\n\nelse:\n if sys.version_info[0] < 2 or sys.version_info[1] < 6:\n raise Exception(\"Cmd2 requires Python >= 2.6.\")\n __all__.extend([\"wraps\", \"annotate\", \"kw_only\"])\n\n def getfullargspec(func):\n \"\"\"Imitate Python 3's inspect.getfullargspec\"\"\"\n args_, varargs, varkw, defaults_ = inspect.getargspec(func)\n kwonlydefaults = getattr(func, \"kw_only\", None)\n kwonlyargs = kwonlydefaults.keys() if kwonlydefaults else []\n # remove kw-only args *and* corresponding defaults\n if defaults_:\n args = args_[:-len(defaults_)]\n defaults = []\n for arg, default in zip(args_[-len(defaults_):], defaults_):\n if arg not in kwonlyargs:\n args.append(arg)\n defaults.append(default)\n else: # avoiding args_[:-0]\n args = args_\n defaults = defaults_\n annotations = getattr(func, \"func_annotations\", {})\n FullArgSpec = namedtuple(\"FullArgSpec\",\n (\"args\", \"varargs\", \"varkw\", \"defaults\",\n \"kwonlyargs\", \"kwonlydefaults\", \"annotations\"))\n return FullArgSpec(args, varargs, varkw, defaults,\n kwonlyargs, kwonlydefaults, annotations)\n\n # modified from Python 3's inspect module to handle kwonly arguments.\n def getcallargs(func, *positional, **named):\n \"\"\"Get the mapping of arguments to values.\n\n A dict is returned, with keys the function argument names (including\n the names of the * and ** arguments, if any), and values the respective\n bound values from 'positional' and 'named'.\"\"\"\n spec = getfullargspec(func)\n args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec\n f_name = func.__name__\n arg2value = {}\n\n if inspect.ismethod(func) and func.__self__ is not None:\n # implicit 'self' (or 'cls' for classmethods) argument\n positional = (func.__self__,) + positional\n num_pos = len(positional)\n num_total = num_pos + len(named)\n num_args = len(args)\n num_defaults = len(defaults) if defaults else 0\n for arg, value in zip(args, positional):\n arg2value[arg] = value\n if varargs:\n if num_pos > num_args:\n arg2value[varargs] = positional[-(num_pos-num_args):]\n else:\n arg2value[varargs] = ()\n elif 0 < num_args < num_pos:\n raise TypeError('%s() takes %s %d positional %s (%d given)' % (\n f_name, 'at most' if defaults else 'exactly', num_args,\n 'arguments' if num_args > 1 else 'argument', num_total))\n elif num_args == 0 and num_total:\n if varkw or kwonlyargs:\n if num_pos:\n # XXX: We should use num_pos, but Python also uses num_total:\n raise TypeError('%s() takes exactly 0 positional arguments '\n '(%d given)' % (f_name, num_total))\n else:\n raise TypeError('%s() takes no arguments (%d given)' %\n (f_name, num_total))\n\n for arg in itertools.chain(args, kwonlyargs):\n if arg in named:\n if arg in arg2value:\n raise TypeError(\"%s() got multiple values for keyword \"\n \"argument '%s'\" % (f_name, arg))\n else:\n arg2value[arg] = named.pop(arg)\n for kwonlyarg in kwonlyargs:\n if kwonlyarg not in arg2value:\n try:\n arg2value[kwonlyarg] = kwonlydefaults[kwonlyarg]\n except KeyError:\n raise TypeError(\"%s() needs keyword-only argument %s\" %\n (f_name, kwonlyarg))\n if defaults: # fill in any missing values with the defaults\n for arg, value in zip(args[-num_defaults:], defaults):\n if arg not in arg2value:\n arg2value[arg] = value\n if varkw:\n arg2value[varkw] = named\n elif named:\n unexpected = next(iter(named))\n raise TypeError(\"%s() got an unexpected keyword argument '%s'\" %\n (f_name, unexpected))\n unassigned = num_args - len([arg for arg in args if arg in arg2value])\n if unassigned:\n num_required = num_args - num_defaults\n raise TypeError('%s() takes %s %d %s (%d given)' % (\n f_name, 'at least' if defaults else 'exactly', num_required,\n 'arguments' if num_required > 1 else 'argument', num_total))\n return arg2value\n\n def annotate(**kwargs):\n \"\"\"Decorator factory to simulate Python 3's annotation mechanism.\"\"\"\n def decorator(func):\n argspec = getfullargspec(func)\n for kw in kwargs:\n if (kw not in argspec.args and\n kw not in argspec.kwonlyargs and\n kw not in [argspec.varargs, argspec.varkw, \"return\"]):\n raise Exception(\n \"Invalid annotation ({0}={1}) for function {2}.\".\n format(kw, kwargs[kw], func.__name__))\n func.func_annotations = kwargs\n return func\n return decorator\n\n def kw_only(*args):\n \"\"\"Decorator factory to simulate Python 3's kw-only arguments without\n actually enforcing it.\"\"\"\n def decorator(func):\n argspec = getfullargspec(func)\n kw_args = (argspec.args[-len(argspec.defaults):]\n if argspec.defaults else [])\n for kw in args:\n if kw not in kw_args:\n raise Exception(\n \"Invalid kw-only annotation of argument {0} for \"\n \"function {1}\".format(kw, func.__name__))\n func.kw_only = dict((kw, argspec.defaults[kw_args.index(kw)])\n for kw in args)\n return func\n return decorator\n\n def wraps(func):\n \"\"\"Decorator factory that calls functools.wraps and also sets the\n __wrapped__ attribute.\n \"\"\"\n def wrapper_decorator(wrapper):\n decorated_wrapper = functools.wraps(func)(wrapper)\n decorated_wrapper.__wrapped__ = func\n return decorated_wrapper\n return wrapper_decorator\n\nGETS_RAW = \"_gets_raw\"\ndef gets_raw(func):\n \"\"\"Decorator indicating that the do_* method requires an unparsed line.\"\"\"\n setattr(func, GETS_RAW, True)\n return func\n\nUSE_MY_ANNOTATIONS = \"_use_my_annotations\"\ndef use_my_annotations(func):\n \"\"\"Decorator indicating that the annotations of the decorated method should\n be used.\n \"\"\"\n setattr(func, USE_MY_ANNOTATIONS, True)\n return func\n\nclass ArgListError(Exception):\n \"\"\"The argument list to the dispatched method could not be constructed.\"\"\"\n pass\n\nclass ParsedCmd(Cmd, object):\n \"\"\"An subclass of cmd.Cmd that can parse arguments.\"\"\"\n\n def __init__(self, **kwargs):\n show_usage = kwargs.pop(\"show_usage\", False)\n Cmd.__init__(self, **kwargs)\n self.show_usage = show_usage\n\n def onecmd(self, line):\n # initial parsing\n cmd, arg, line = self.parseline(line)\n if not line:\n return self.emptyline()\n if cmd is None:\n return self.default(line)\n self.lastcmd = line\n # find the method to which dispatch\n if cmd == \"\":\n return self.default(line)\n try:\n func = getattr(self, \"do_\" + cmd)\n except AttributeError:\n return self.default(line)\n inner_func = func\n while (hasattr(inner_func, \"__wrapped__\") and\n not getattr(inner_func, USE_MY_ANNOTATIONS, None)):\n inner_func = inner_func.__wrapped__\n try:\n args, kwargs = self.construct_arglist(arg, func, inner_func)\n except ArgListError as exc:\n callback, args = exc.args\n return callback(*args)\n return func(*args, **kwargs)\n onecmd.__doc__ = Cmd.onecmd.__doc__\n\n def split(self, line):\n \"\"\"Split the argument list.\"\"\"\n return [arg.replace(\"\\0\", \"\") for arg in shlex.split(line)]\n\n def construct_arglist(self, arg, func, inner_func):\n \"\"\"Construct *args and **kwargs to be passed to func from arg and\n inner_func's signature.\n \"\"\"\n if getattr(inner_func, GETS_RAW, None):\n return [arg], {}\n args = self.split(arg)\n argspec = getfullargspec(inner_func)\n kw_args = (argspec.args[-len(argspec.defaults):]\n if argspec.defaults else [])\n kw_only = (argspec.kwonlydefaults.copy()\n if argspec.kwonlydefaults else {})\n # args = [\"--kw\", opt, \"--kw\", opt, ..., val, val...]\n # -> args = [val, val, ...]\n # -> opts = {\"kw\": opt, \"kw\": opt, ...}\n while args and isinstance(args[0], basestring):\n kw = args[0].lstrip(\"-\")\n if kw in kw_only:\n try:\n kw_only[kw] = args[1]\n except IndexError:\n exc_s = \"Value not given for option.\"\n raise ArgListError(self.bind_error, (args, exc_s))\n args = args[2:]\n else:\n break\n if not inspect.ismethod(inner_func):\n args.insert(0, self)\n try:\n callargs = getcallargs(inner_func, *args, **kw_only)\n except TypeError as exc:\n exc_s = str(exc)\n raise ArgListError(self.bind_error, (args, exc_s))\n for varname in callargs:\n cast = argspec.annotations.get(varname)\n if not callable(cast):\n continue\n bound_val = callargs[varname]\n if varname == argspec.varargs:\n bound_val = list(bound_val)\n for i, arg in enumerate(bound_val):\n try:\n bound_val[i] = cast(arg)\n except Exception as exc:\n exc_s = str(exc)\n raise ArgListError(self.cast_error,\n (varname, arg, cast, exc_s))\n callargs[varname] = bound_val\n elif varname == argspec.varkw:\n for key, val in bound_val.items():\n try:\n bound_val[key] = cast(val)\n except Exception as exc:\n exc_s = str(exc)\n raise ArgListError(self.cast_error,\n (varname, arg, cast, exc_s))\n elif (bound_val ==\n (argspec.kwonlydefaults or {}).get(varname, object())):\n continue # same as given default, keyword-only\n elif (varname in kw_args and\n bound_val == argspec.defaults[kw_args.index(varname)]):\n continue # same as given default, non-keyword-only\n else:\n try:\n callargs[varname] = cast(bound_val)\n except Exception as exc:\n exc_s = str(exc)\n raise ArgListError(self.cast_error,\n (varname, bound_val, cast, exc_s))\n # reconstruct the argument list\n args = [callargs[varname]\n for varname in argspec.args if varname not in kw_args]\n if argspec.varargs:\n args.extend(callargs[argspec.varargs])\n kwargs = dict((varname, callargs[varname])\n for varname in kw_args + argspec.kwonlyargs)\n if argspec.varkw:\n kwargs.update(argspec.varkw)\n if inspect.ismethod(func):\n return args[1:], kwargs\n else:\n return args, kwargs\n\n def bind_error(self, args, exc):\n \"\"\"Called when the argument list does not match the method's\n signature.\"\"\"\n self.stdout.write(\n \"*** This argument list could not be bound: {}\\n*** {}\\n\".\n format(args, exc))\n\n def cast_error(self, varname, value, cast, exc):\n \"\"\"Called when an argument cannot be cast by the given caster.\"\"\"\n self.stdout.write(\n textwrap.fill('*** While trying to cast \"{0}\" with \"{1}\" for '\n 'argument \"{2}\", the following exception was '\n 'thrown:\\n'.format(value, cast, varname),\n 72, subsequent_indent=\"*** \"))\n self.stdout.write(\"*** {}\".format(exc))\n\n def do_help(self, cmd=None):\n Cmd.do_help(self, cmd)\n if not cmd or not self.show_usage:\n return\n do_ = getattr(self, \"do_\" + cmd, None)\n if not do_ or hasattr(self, \"help_ \" + cmd):\n return\n while (hasattr(do_, \"__wrapped__\") and\n not getattr(do_, USE_MY_ANNOTATIONS, None)):\n do_ = do_.__wrapped__\n if getattr(do_, GETS_RAW, None):\n return\n spec = getfullargspec(do_)\n args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec\n non_kw_args = args[:-len(defaults)] if defaults else args\n kw_args = args[-len(defaults):] if defaults else []\n if not defaults:\n defaults = []\n if not kwonlydefaults:\n kwonlydefaults = {}\n helpstr = \"\\t\" + cmd\n for arg, default in kwonlydefaults.items():\n helpstr += \" [-{0} {1}(={2})]\".format(arg, arg[0].upper(), default)\n for arg, default in zip(kw_args, defaults):\n helpstr += \" [{0}(={1})]\".format(arg.upper(), default)\n for arg in non_kw_args[1:]:\n helpstr += \" {0}\".format(arg.upper())\n if varargs:\n helpstr += \" [{0}]\".format(varargs.upper())\n self.stdout.write(helpstr + \"\\n\")\n\ndef boolean(s):\n \"\"\"A generalized boolean caster.\"\"\"\n return s.lower() not in [\"off\", \"false\", \"f\", \"0\"]\n" }, { "alpha_fraction": 0.5487493872642517, "alphanum_fraction": 0.5558958649635315, "avg_line_length": 30.095237731933594, "blob_id": "f1db995c0f8bbe1643d0ee28b442694a3eb7e10e", "content_id": "72bbfee907517099fc309956e942613059519e13", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1959, "license_type": "permissive", "max_line_length": 74, "num_lines": 63, "path": "/test/_test_py3.py", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "from io import StringIO\nfrom parsedcmd import *\n\nclass UI(ParsedCmd):\n def do_print(self, line=\"abc\", *, flag: boolean=True, repeat: int=1):\n \"\"\"Print a given string (defaults to \"abc\").\n Print nothing if -flag is set to false.\n Print multiple copies if -repeat N option is given.\n \"\"\"\n if flag:\n for i in range(repeat):\n print(line, file=self.stdout)\n\n def do_multiply(self, mul: int, *nums: int):\n \"\"\"Print `mul` times the numbers given.\n \"\"\"\n for num in nums:\n print(mul * num, file=self.stdout)\n\n @gets_raw\n def do_shell(self, line):\n \"\"\"Evaluates the given line.\n \"\"\"\n eval(line)\n\nclass Tests:\n def setup(self):\n self.out = StringIO()\n self.ui = UI(stdout=self.out, show_usage=True)\n\n def test_print(self):\n self.ui.onecmd(\"print\")\n assert self.out.getvalue().strip() == \"abc\"\n\n def test_print_repeat(self):\n self.ui.onecmd(\"print -repeat 3 def\")\n assert self.out.getvalue().strip() == \"def\\ndef\\ndef\"\n\n def test_print_flag(self):\n self.ui.onecmd(\"print -flag off -repeat 3 def\")\n assert self.out.getvalue().strip() == \"\"\n\n def test_help_print(self):\n self.ui.onecmd(\"help print\")\n assert (self.out.getvalue().strip() ==\n UI.do_print.__doc__ +\n \"\\n\\tprint [-flag F(=True)] [-repeat R(=1)] [LINE(=abc)]\")\n\n def test_multiply(self):\n self.ui.onecmd(\"multiply 4 1 2 3\")\n assert self.out.getvalue().strip() == \"4\\n8\\n12\"\n\n def test_help_multiply(self):\n self.ui.onecmd(\"?multiply\")\n assert (self.out.getvalue().strip() ==\n UI.do_multiply.__doc__ + \"\\n\\tmultiply MUL [NUMS]\")\n\n def test_shell(self):\n self.ui.onecmd(\"!print(1, file=self.stdout)\")\n assert self.out.getvalue().strip() == \"1\"\n\nif __name__ == \"__main__\":\n UI(show_usage=True).cmdloop()\n" }, { "alpha_fraction": 0.5732510089874268, "alphanum_fraction": 0.5798354148864746, "avg_line_length": 31.83783721923828, "blob_id": "9989cd2b63a47474f37029210d66527c2041570e", "content_id": "1f6d8a47ebd7cca769199bdc962898580e4516dc", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2430, "license_type": "permissive", "max_line_length": 75, "num_lines": 74, "path": "/test/_test_py2.py", "repo_name": "aeternocap/parsedcmd", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom StringIO import StringIO\nfrom parsedcmd import *\n\nclass UI(ParsedCmd):\n # Non-annotated arguments default to str.\n # boolean is a utility function, that casts every string to True,\n # except \"f\", \"false\", \"off\" and \"0\" (case-insensitive).\n @annotate(flag=boolean, repeat=int)\n @kw_only(\"flag\", \"repeat\")\n def do_print(self, line=\"abc\", flag=True, repeat=1):\n \"\"\"Print a given string (defaults to \"abc\").\n Print nothing if -flag is set to false.\n Print multiple copies if -repeat N option is given.\n \"\"\"\n if flag:\n for i in range(repeat):\n print(line, file=self.stdout)\n\n # *args can also be annotated.\n # Python 2's usual limitations about mixing keyword arguments and *args\n # applies.\n @annotate(mul=int, nums=int)\n def do_multiply(self, mul, *nums):\n \"\"\"Print `mul` times the numbers given.\n \"\"\"\n for num in nums:\n print(mul * num, file=self.stdout)\n\n # Do not parse the argument line for do_shell.\n @gets_raw\n def do_shell(self, line):\n \"\"\"Evaluates the given line.\n \"\"\"\n eval(line)\n\nclass Tests:\n def setup(self):\n self.out = StringIO()\n self.ui = UI(stdout=self.out, show_usage=True)\n\n def test_print(self):\n self.ui.onecmd(\"print\")\n assert self.out.getvalue().strip() == \"abc\"\n\n def test_print_repeat(self):\n self.ui.onecmd(\"print -repeat 3 def\")\n assert self.out.getvalue().strip() == \"def\\ndef\\ndef\"\n\n def test_print_flag(self):\n self.ui.onecmd(\"print -flag off -repeat 3 def\")\n assert self.out.getvalue().strip() == \"\"\n\n def test_help_print(self):\n self.ui.onecmd(\"?print\")\n assert (self.out.getvalue().strip() ==\n UI.do_print.__doc__ +\n \"\\n\\tprint [-flag F(=True)] [-repeat R(=1)] [LINE(=abc)]\")\n\n def test_multiply(self):\n self.ui.onecmd(\"multiply 4 1 2 3\")\n assert self.out.getvalue().strip() == \"4\\n8\\n12\"\n\n def test_help_multiply(self):\n self.ui.onecmd(\"?multiply\")\n assert (self.out.getvalue().strip() ==\n UI.do_multiply.__doc__ + \"\\n\\tmultiply MUL [NUMS]\")\n\n def test_shell(self):\n self.ui.onecmd(\"!print(1, file=self.stdout)\")\n assert self.out.getvalue().strip() == \"1\"\n\nif __name__ == \"__main__\":\n UI(show_usage=True).cmdloop()\n" } ]
6
zihangdai/pytorch_xworld
https://github.com/zihangdai/pytorch_xworld
dc1b0ca1537eead0e62fc84480b9ebc1e4acb59c
03d63e9d702ffb687e672f41d82632fc18e80ff7
d92f9e271004d0d424835d2b726c8f17bdd0a11d
refs/heads/master
2021-01-21T04:26:22.263826
2017-09-26T23:12:33
2017-09-26T23:12:33
101,913,275
6
3
null
2017-08-30T18:03:38
2017-08-31T06:32:25
2017-09-26T23:12:34
Python
[ { "alpha_fraction": 0.6253525018692017, "alphanum_fraction": 0.6350007653236389, "avg_line_length": 39.5843391418457, "blob_id": "c11f4e382eafa90ed842891298d70d5ad2147a73", "content_id": "824ec3e942c2563cc8826ff6b7249ac5372c637f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6737, "license_type": "no_license", "max_line_length": 158, "num_lines": 166, "path": "/evaluate.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import sys, os\nimport argparse\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport torch.multiprocessing as mp\n\nimport torchvision.utils as vutils\nimport torchvision.transforms as transforms\n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nfrom py_simulator import Simulator\nfrom config import xworld_config\nfrom model import Agent\nfrom vocab import Vocab\nfrom replay_memory import *\nfrom functions import *\nfrom utils import *\n\nconfig = xworld_config()\nfig, axes = plt.subplots(6,5, figsize=(36, 20))\nfig.tight_layout(pad=5, w_pad=2, h_pad=5)\n\ndef visualize_spatial_attn(save_dir, info):\n image = opencv_to_rgb(info['image'])\n image[:,:,6*12+3:7*12-3,6*12+3:7*12-3] = image.min()\n vutils.save_image(image, os.path.join(save_dir, 'image.png'), nrow=6, pad_value=1, padding=1)\n\n env_map_vis = vis_scale_image(info['env_map'].data.cpu(), config.pixel_per_grid)\n vutils.save_image(env_map_vis, os.path.join(save_dir, 'env_map.png'), nrow=6, pad_value=1, padding=1)\n\n grid_attn_vis = vis_scale_image(info['grid_attns'].data.cpu(), config.pixel_per_grid)\n vutils.save_image(grid_attn_vis, os.path.join(save_dir, 'grid_attn.png'), normalize=False, scale_each=False, nrow=6, pad_value=1, padding=1)\n\n heatmap_vis = vis_scale_image(info['heatmaps'].data.cpu(), config.pixel_per_grid)\n vutils.save_image(heatmap_vis, os.path.join(save_dir, 'heatmap.png'), normalize=False, scale_each=False, nrow=6, pad_value=1, padding=1)\n\n cached_attn_vis = vis_scale_image(info['cached_attns'].data.cpu(), config.pixel_per_grid)\n vutils.save_image(cached_attn_vis, os.path.join(save_dir, 'cached_attn.png'), normalize=False, scale_each=False, nrow=6, pad_value=1, padding=1)\n\ndef visualize_sequence_attn(save_dir, vocab, info, axes, fig, task):\n clear_axes(axes)\n seq_attns = info['seq_attns'].permute(1, 0, 2).cpu().numpy() # [batch x step x seqlen]\n sequence = info['question'].numpy() if task == 'rec' else info['command'].numpy()\n for idx in range(seq_attns.shape[0]):\n i, j = idx / 5, idx % 5\n seq_attn = seq_attns[idx]\n xticks = vocab.convert_to_sym(sequence[:,idx])\n yticks = ['step {}'.format(step) for step in range(seq_attn.shape[0])]\n if task == 'rec':\n answer, predict = vocab.convert_to_sym([info['answer'][idx], info['predict'][idx]])\n axes[i,j].set_title('A: {} <-> P: {}'.format(answer, predict))\n vis_seq_attn(axes[i,j], seq_attn, xticks, yticks)\n fig.savefig(os.path.join(save_dir, 'seq_attn.png'), format='png')\n\ndef mkdir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\ndef visualize_all_steps(nav_info_list, vocab):\n save_dir = os.path.join('evaluate', str(time.time()))\n mkdir(save_dir)\n \n info = {}\n info['image'] = torch.cat([nav_info['image'] for nav_info in nav_info_list])\n info['env_map'] = torch.cat([nav_info['env_map'] for nav_info in nav_info_list])\n info['grid_attns'] = torch.cat([nav_info['grid_attns'][-1] for nav_info in nav_info_list])\n info['heatmaps'] = torch.cat([nav_info['heatmaps'][-1] for nav_info in nav_info_list])\n info['cached_attns'] = torch.cat([nav_info['cached_attns'][-1] for nav_info in nav_info_list])\n visualize_spatial_attn(save_dir, info)\n\n info['seq_attns'] = torch.cat([torch.stack(nav_info['seq_attns']).data.cpu() for nav_info in nav_info_list], dim=1)\n info['command'] = torch.cat([nav_info['command'] for nav_info in nav_info_list], dim=1)\n visualize_sequence_attn(save_dir, vocab, info, axes, fig, task='nav')\n\ndef run_episode(env, act_net, vocab):\n env.reset_game()\n command = None\n success_flag = 0.\n\n rewards = []\n nav_info_list = []\n while True:\n curr_state = xwd_get_state(config, env, vocab, command)\n\n done = env.game_over()\n if done != 'alive':\n if config.show_screen: env.show_screen()\n episode_return = sum(rewards)\n\n if 'success' not in done and len(nav_info_list) > 0:\n if config.vis_fail: visualize_all_steps(nav_info_list, vocab)\n else:\n success_flag = 1.\n\n break\n\n image, command, question, answer = curr_state\n\n if command is None:\n action = xwd_random_step(env)\n else:\n if config.show_screen: env.show_screen()\n action, nav_info = act_net(variable(image), command=variable(command), act_only=True)\n action = action.data[0,0]\n reward = env.take_actions({'action': action, 'pred_sentence': ''})\n rewards.append(float(reward))\n\n nav_info['image'] = image\n nav_info['command'] = command \n nav_info_list.append(nav_info)\n\n return episode_return, success_flag\n\ndef main():\n checkpoint = torch.load(latest_checkpoint(os.path.join(config.load_dir, 'checkpoint')))\n vocab = checkpoint['vocab']\n\n model = Agent(config, vocab.size())\n model.load_state_dict(checkpoint['model'])\n\n env = Simulator.create(config.env_name, {'conf_path':config.eval_conf_path, 'curriculum':0, \n 'task_mode':'arxiv_lang_acquisition'})\n if config.cuda:\n model.cuda()\n model.eval()\n\n total_return, total_success = 0., 0.\n\n for i in range(config.eval_episode):\n ep_return, ep_success= run_episode(env, model, vocab)\n total_return += ep_return\n total_success += ep_success\n\n print('Average episode return: {:.4f}; Success rate {:.3f}'.format(float(total_return) / config.eval_episode, float(total_success) / config.eval_episode))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch xworld evaluation')\n parser.add_argument('--eval_episode', type=int, default=100, metavar='N',\n help='number of episodes to run')\n parser.add_argument('--load_dir', type=str, default='log_xworld', \n help='directory to load checkpoints')\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\n parser.add_argument('--show_screen', action='store_true', default=False,\n help='shows screen for sanity check')\n parser.add_argument('--vis_fail', action='store_true', default=False,\n help='visualize failure cases')\n args = parser.parse_args()\n\n for k, v in args.__dict__.items():\n setattr(config, k, v)\n\n global variable\n variable = create_variable_func(config.cuda)\n \n main()\n" }, { "alpha_fraction": 0.5242403149604797, "alphanum_fraction": 0.5256320834159851, "avg_line_length": 27.739999771118164, "blob_id": "c31e2833cdb72d94117194a74317c26e0761f75a", "content_id": "2812c8e45a1dc0f9783abf67723b2674648a3f9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4311, "license_type": "no_license", "max_line_length": 97, "num_lines": 150, "path": "/vocab.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import torch\n\nclass Vocab(object):\n def __init__(self, data=None, lower=False):\n self.idx_to_sym = {}\n self.sym_to_idx = {}\n self.frequencies = {}\n self.lower = lower\n\n # Special entries will not be pruned.\n self.special = []\n\n if data is not None:\n if type(data) == str:\n self.load_file(data)\n else:\n self.add_specials(data)\n\n def size(self):\n return len(self.idx_to_sym)\n\n # Load entries from a file.\n def load_file(self, filename):\n for line in open(filename):\n fields = line.split()\n sym = fields[0]\n idx = int(fields[1])\n self.add(sym, idx)\n\n # Write entries to a file.\n def write_file(self, filename):\n with open(filename, 'w') as file:\n for i in range(self.size()):\n sym = self.idx_to_sym[i]\n file.write('%s %d\\n' % (sym, i))\n\n file.close()\n\n def lookup(self, key, default=None):\n key = key.lower() if self.lower else key\n try:\n return self.sym_to_idx[key]\n except KeyError:\n return default\n\n def get_sym(self, idx, default=None):\n try:\n return self.idx_to_sym[idx]\n except KeyError:\n return default\n\n # Mark this `sym` and `idx` as special (i.e. will not be pruned).\n def add_special(self, sym, idx=None):\n idx = self.add(sym, idx)\n self.special += [idx]\n\n # Mark all syms in `syms` as specials (i.e. will not be pruned).\n def add_specials(self, syms):\n for sym in syms:\n self.add_special(sym)\n\n # Add `sym` in the dictionary. Use `idx` as its index if given.\n def add(self, sym, idx=None):\n if idx is not None:\n self.idx_to_sym[idx] = sym\n self.sym_to_idx[sym] = idx\n else:\n if sym in self.sym_to_idx:\n idx = self.sym_to_idx[sym]\n else:\n idx = len(self.idx_to_sym)\n self.idx_to_sym[idx] = sym\n self.sym_to_idx[sym] = idx\n\n if idx not in self.frequencies:\n self.frequencies[idx] = 1\n else:\n self.frequencies[idx] += 1\n\n return idx\n\n \n # Return a new dictionary with vocabs >= min_freq.\n def prune_by_freq(self, min_freq):\n # Sort by frequency\n freq = torch.Tensor(\n [self.frequencies[i] for i in range(len(self.frequencies))])\n sort_freq, idx = torch.sort(freq, 0, True)\n\n new_dict = Vocab()\n\n # Add special entries in all cases.\n for i in self.special:\n new_dict.add_special(self.idx_to_sym[i])\n\n for f, i in zip(sort_freq, idx):\n if f < min_freq:\n break\n new_dict.add(self.idx_to_sym[i])\n\n return new_dict\n\n if size >= self.size():\n return self\n\n # Only keep the `size` most frequent entries.\n freq = torch.Tensor(\n [self.frequencies[i] for i in range(len(self.frequencies))])\n _, idx = torch.sort(freq, 0, True)\n\n new_dict = Vocab(lower=self.lower)\n\n # Add special entries in all cases.\n for i in self.special:\n new_dict.add_special(self.idx_to_sym[i])\n\n for i in idx[:size]:\n new_dict.add(self.idx_to_sym[i])\n\n return new_dict\n\n # Convert `symbols` to indices (LongTensor). Use `unk` if not found.\n # Optionally insert `bos` at the beginning and `eos` at the .\n def convert_to_idx(self, syms, unk, bos=None, eos=None):\n vec = []\n\n if bos is not None:\n vec += [self.lookup(bos)]\n\n unk = self.lookup(unk)\n # vec += [self.lookup(sym, default=unk) for sym in syms]\n for sym in syms:\n idx = self.lookup(sym, default=unk)\n vec.append(idx)\n\n if eos is not None:\n vec += [self.lookup(eos)]\n\n return torch.LongTensor(vec)\n\n # Convert list of indices to list of syms. If index `stop` is reached, convert it and return.\n def convert_to_sym(self, idx, stop=None):\n syms = []\n\n for i in idx:\n syms.append(self.get_sym(i))\n if stop is not None and i == stop:\n break\n\n return syms\n" }, { "alpha_fraction": 0.5546624064445496, "alphanum_fraction": 0.5803858637809753, "avg_line_length": 24.91666603088379, "blob_id": "634cb22d4a64db1f18f7b30b355b9144df52860d", "content_id": "7643b5ab7e4d795f54e127ff8a89f868e58ed4d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/utils/rl_utils.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\n\ndef reward_to_value(rewards, gamma, baseline=None):\n values = []\n R = 0.\n for r in rewards[::-1]:\n R = r + gamma * R\n values.insert(0, R)\n if baseline:\n values = [v - baseline for v in values]\n\n return values\n\ndef copy_state(m1, m2):\n \"\"\"\n Copy all parameters and states from m1 to m2.\n \n Often used for updating the target value network.\n \"\"\"\n for p1, p2 in zip(m1.parameters(), m2.parameters()):\n p2.data.copy_(p1.data)\n for key in m1._buffers.keys():\n m2._buffers[key].data.copy_(m1._buffers[key].data)\n" }, { "alpha_fraction": 0.6075271964073181, "alphanum_fraction": 0.6159618496894836, "avg_line_length": 43.07948684692383, "blob_id": "69f0f522fcd917405daff112e8ec6cd8566c5153", "content_id": "ef93bd3539014609e08a5f28e3a39141e0411c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17191, "license_type": "no_license", "max_line_length": 148, "num_lines": 390, "path": "/trainer.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division\nimport sys, os\nimport argparse\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport torch.multiprocessing as mp\n\nimport torchvision.utils as vutils\nimport torchvision.transforms as transforms\n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nfrom py_simulator import Simulator\nfrom config import xworld_config\nfrom model import Agent\nfrom vocab import Vocab\nfrom replay_memory import *\nfrom functions import *\nfrom utils import *\n\nconfig = xworld_config()\n\nrec_fig, rec_axes = plt.subplots(4,4, figsize=(24, 15))\nrec_fig.tight_layout(pad=5, w_pad=2, h_pad=5)\n\nnav_fig, nav_axes = plt.subplots(4,4, figsize=(24, 15))\nnav_fig.tight_layout(pad=5, w_pad=2, h_pad=5)\n\ndef train_recognition(act_net, rec_memory):\n # ========== Supervised Learning ==========\n rec_batch = rec_memory.sample(config.batch_size)\n rec_image, question, answer = rec_batch\n\n rec_logit, rec_info = act_net(variable(rec_image), question=variable(question))\n ce_loss = F.cross_entropy(rec_logit, variable(answer))\n\n predict = rec_logit.max(1, **kwargs)[1].squeeze(1).data.cpu()\n rec_accu = torch.eq(predict, answer).float().mean()\n\n # ========== Monitoring ==========\n rec_info['rec_accu'] = rec_accu\n rec_info['image'] = rec_image\n rec_info['question'] = question\n rec_info['predict'] = predict\n rec_info['answer'] = answer\n\n return ce_loss, rec_info\n\ndef train_prioritize(act_net, tgt_net, nav_memory):\n # ========== Reinforcement Learning ===========\n nav_batch, nav_weight, nav_indices = nav_memory.sample(config.batch_size, config.beta)\n curr_state, action, next_state, reward = nav_batch\n nav_weight = variable(nav_weight.unsqueeze(1))\n \n curr_image, curr_command = curr_state\n _, action_prob, curr_value, nav_info = act_net(variable(curr_image), command=variable(curr_command))\n\n next_image, next_command = next_state\n next_value, _ = tgt_net(variable(next_image, volatile=True), command=variable(next_command, volatile=True), val_only=True)\n next_value.volatile = False\n\n # This essentially implements the Huber loss\n target_value = config.gamma * next_value + variable(reward)\n td_error = torch.clamp(target_value - curr_value, -1., 1.).detach()\n td_loss = torch.mean(-td_error * curr_value * nav_weight)\n\n log_prob = torch.log(1e-6 + action_prob)\n act_log_prob = log_prob.gather(1, variable(action.unsqueeze(1)))\n pg_loss = torch.mean(-td_error * act_log_prob * nav_weight)\n\n nav_priorities = (torch.abs(td_error.squeeze(1)).data + 1e-6).cpu().tolist()\n nav_memory.update_priorities(nav_indices, nav_priorities)\n\n # ========== Monitoring ==========\n nav_info['command'] = curr_command\n nav_info['image'] = curr_image\n nav_info['weight'] = nav_weight\n nav_info['td_error'] = td_error.data.abs().mean()\n\n return td_loss, pg_loss, nav_info\n\ndef train_standard(act_net, tgt_net, nav_memory):\n # ========== Reinforcement Learning ===========\n curr_state, action, next_state, reward = nav_memory.sample(config.batch_size)\n \n curr_image, curr_command = curr_state\n _, action_prob, curr_value, nav_info = act_net(variable(curr_image), command=variable(curr_command))\n\n next_image, next_command = next_state\n next_value, _ = tgt_net(variable(next_image, volatile=True), command=variable(next_command, volatile=True), val_only=True)\n next_value.volatile = False\n\n # This essentially implements the Huber loss\n target_value = variable(reward) + config.gamma * next_value\n td_error = torch.clamp(target_value - curr_value, -1., 1.).detach()\n td_loss = torch.mean(-td_error * curr_value)\n\n log_prob = torch.log(1e-6 + action_prob)\n act_log_prob = log_prob.gather(1, variable(action.unsqueeze(1)))\n pg_loss = torch.mean(-td_error * act_log_prob)\n\n # ========== Monitoring ==========\n nav_info['command'] = curr_command\n nav_info['image'] = curr_image\n nav_info['td_error'] = td_error.data.abs().mean()\n\n return td_loss, pg_loss, nav_info\n\ndef visualize_spatial_attn(save_dir, info, task):\n vutils.save_image(opencv_to_rgb(info['image'].cpu()), os.path.join(save_dir, 'image.png'), nrow=4, pad_value=1, padding=1)\n if task == 'nav':\n env_map_vis = vis_scale_image(info['env_map'].data.cpu(), config.pixel_per_grid)\n vutils.save_image(env_map_vis, os.path.join(save_dir, 'env_map.png'), normalize=False, scale_each=False, nrow=4, pad_value=1, padding=1)\n\n grid_attn_vis = vis_scale_image(info['grid_attns'][-1].data.cpu(), config.pixel_per_grid)\n vutils.save_image(grid_attn_vis, os.path.join(save_dir, 'grid_attn.png'), normalize=False, scale_each=False, nrow=4, pad_value=1, padding=1)\n\n heatmap_vis = vis_scale_image(info['heatmaps'][-1].data.cpu(), config.pixel_per_grid)\n vutils.save_image(heatmap_vis, os.path.join(save_dir, 'heatmap.png'), normalize=False, scale_each=False, nrow=4, pad_value=1, padding=1)\n\n cached_attn_vis = vis_scale_image(info['cached_attns'][-1].data.cpu(), config.pixel_per_grid)\n vutils.save_image(cached_attn_vis, os.path.join(save_dir, 'cached_attn.png'), normalize=False, scale_each=False, nrow=4, pad_value=1, padding=1)\n\ndef visualize_sequence_attn(save_dir, vocab, info, axes, fig, task):\n clear_axes(axes)\n seq_attns = torch.stack(info['seq_attns']).data.permute(1, 0, 2).cpu().numpy() # [batch x step x seqlen]\n sequence = info['question'].numpy() if task == 'rec' else info['command'].numpy()\n for idx in range(seq_attns.shape[0]):\n i, j = idx // 4, idx % 4\n seq_attn = seq_attns[idx]\n xticks = vocab.convert_to_sym(sequence[:,idx])\n yticks = ['step {}'.format(step) for step in range(seq_attn.shape[0])]\n if task == 'rec':\n answer, predict = vocab.convert_to_sym([info['answer'][idx], info['predict'][idx]])\n axes[i,j].set_title('A: {} <-> P: {}'.format(answer, predict))\n vis_seq_attn(axes[i,j], seq_attn, xticks, yticks)\n fig.savefig(os.path.join(save_dir, 'seq_attn.png'), format='png')\n\ndef train_model(act_net, tgt_net, rec_memory, nav_memory, optimizer, vocab, vis=False):\n act_net.train()\n # ========== Forward computation ==========\n if config.prioritize:\n td_loss, pg_loss, nav_info = train_prioritize(act_net, tgt_net, nav_memory)\n else:\n td_loss, pg_loss, nav_info = train_standard(act_net, tgt_net, nav_memory)\n\n ce_loss, rec_info = train_recognition(act_net, rec_memory)\n\n # ========== Optimization & Monitoring ==========\n if config.monitor_gnorm:\n optimizer.zero_grad()\n td_loss.backward(retain_variables=True)\n gnorm_td = grad_norm(act_net.parameters())\n\n optimizer.zero_grad()\n pg_loss.backward(retain_variables=True)\n gnorm_pg = grad_norm(act_net.parameters())\n\n optimizer.zero_grad()\n ce_loss.backward(retain_variables=True)\n gnorm_ce = grad_norm(act_net.parameters())\n \n tot_loss = td_loss + pg_loss + ce_loss\n \n optimizer.zero_grad()\n tot_loss.backward()\n optimizer.step()\n\n if config.monitor_gnorm:\n gnorm_tot = grad_norm(act_net.parameters())\n\n if vis:\n # ========== recognition task ==========\n visualize_spatial_attn(os.path.join(config.save_dir, 'vis_rec'), rec_info, task='rec')\n\n visualize_sequence_attn(os.path.join(config.save_dir, 'vis_rec'), vocab, rec_info, rec_axes, rec_fig, task='rec')\n\n # ========== navigation task ==========\n visualize_spatial_attn(os.path.join(config.save_dir, 'vis_nav'), nav_info, task='nav')\n\n visualize_sequence_attn(os.path.join(config.save_dir, 'vis_nav'), vocab, nav_info, nav_axes, nav_fig, task='nav')\n\n return_dict = {}\n return_dict.update({'loss ce' : ce_loss.data[0], 'rec accu' : rec_info['rec_accu']})\n return_dict.update({'loss td' : td_loss.data[0], 'loss pg' : pg_loss.data[0], 'abs td error':nav_info['td_error']})\n\n if config.monitor_mask:\n return_dict['rec mask mean'] = rec_info['rec_mask'].data.mean()\n return_dict['rec mask std'] = rec_info['rec_mask'].data.std(1).mean()\n for step in range(config.program_steps):\n return_dict['mask {} std'.format(step)] = rec_info['masks'][step].data.std(1).mean()\n return_dict['mask {} mean'.format(step)] = rec_info['masks'][step].data.mean()\n return_dict['sigma {} mean'.format(step)] = rec_info['sigmas'][step].data.mean()\n\n if config.monitor_gnorm:\n return_dict.update({'gnorm ce' : gnorm_ce})\n return_dict.update({'gnorm td' : gnorm_td, 'gnorm pg' : gnorm_pg})\n return_dict.update({'gnorm tot': gnorm_tot})\n\n return return_dict\n\ndef evaluate(env, act_net, vocab):\n act_net.eval()\n\n env.reset_game()\n command = None\n\n rewards = []\n while True:\n curr_state = xwd_get_state(config, env, vocab, command)\n if config.show_screen: env.show_screen()\n\n done = env.game_over()\n if done != 'alive':\n if config.show_screen: env.show_screen()\n print('=====> Eval episode done with status {} and total return {}'.format(done, sum(rewards)))\n break\n\n image, command, question, answer = curr_state\n\n if command is None:\n action = xwd_random_step(env)\n else:\n action, _ = act_net(variable(image, volatile=True), command=variable(command, volatile=True), act_only=True)\n reward = env.take_actions({'action': action.data[0,0], 'pred_sentence': ''})\n rewards.append(float(reward))\n\ndef main():\n \n env = Simulator.create(config.env_name, {'conf_path':config.conf_path, 'curriculum':config.curriculum,\n 'task_mode':'arxiv_lang_acquisition'})\n\n eval_env = Simulator.create(config.env_name, {'conf_path':config.eval_conf_path, 'curriculum':0, \n 'task_mode':'arxiv_lang_acquisition'})\n \n log_experiment_config(config)\n create_log_dir(root_dir=config.save_dir, sub_dirs=['checkpoint', 'vis_rec', 'vis_nav'])\n\n vocab = load_vocab(config.vocab_dir, special_syms=['$', '#oov#'])\n if config.prioritize:\n nav_memory = PrioritizedReplayMemory(capacity=config.replay_size, pack_func=pack_batch_nav)\n else:\n nav_memory = ReplayMemory(capacity=config.replay_size, pack_func=pack_batch_nav)\n rec_memory = ReplayMemory(capacity=config.replay_size//4, pack_func=pack_batch_rec)\n \n act_net = Agent(config, vocab.size())\n tgt_net = Agent(config, vocab.size())\n\n if config.cuda:\n act_net.cuda()\n tgt_net.cuda()\n copy_state(act_net, tgt_net)\n print('=====> Update target network; Current explore alpha {:.3f}'.format(act_net.alpha))\n \n if config.algo == 'rmsprop':\n optimizer = torch.optim.RMSprop(act_net.parameters(), lr=config.lr, momentum=config.mom, weight_decay=config.w_decay)\n elif config.algo == 'adagrad':\n optimizer = torch.optim.Adagrad(act_net.parameters(), lr=config.lr, weight_decay=config.w_decay)\n elif config.algo == 'adam':\n optimizer = torch.optim.Adam(act_net.parameters(), lr=config.lr, betas=(config.mom, 0.9999), weight_decay=config.w_decay)\n else:\n raise ValueError('Unsupported optimization algorithm {}. Please choose from [\"rmsprop\", \"adagrad\", \"adam\"].'.format(config.algo))\n\n monitor, done_states = Monitor(track_time=True), Monitor(count=False, default_val=0)\n \n train_cnt, frame_cnt = 0, 0\n for eidx in range(config.max_episode):\n # Annealing hyper-parameters\n act_net.alpha = max(0., config.alpha * (1. - float(frame_cnt) / float(config.explore_frame)))\n tgt_net.alpha = max(0., config.alpha * (1. - float(frame_cnt) / float(config.explore_frame)))\n config.beta = min(1., config.beta0 + float(eidx) / float(config.max_episode / 2) * (1. - config.beta0))\n\n # Get initial state\n env.reset_game()\n command = None\n\n # Ignore the first few empty frames without any instruction\n curr_state = xwd_get_state(config, env, vocab, command)\n\n # Episode loop\n rewards = []\n while True:\n # If not alive, exit the loop\n done = env.game_over()\n if done != 'alive':\n values = reward_to_value(rewards, config.gamma)\n \n if len(values) > 0:\n monitor.update('emp_value', values[0])\n monitor.update('steps', len(rewards))\n done_states.update(done, 1)\n\n break\n\n # Unpack the current state into specific\n image, command, question, answer = curr_state\n\n # Push image, question, answer into the recognition memory\n # - NOTE: Recognition task is treated as an independent channel at this moment.\n # As a result, as long as the current state and question and answer, we\n # record an entry in the memory\n if question is not None and answer is not None:\n entry = image, question, answer\n rec_memory.push(entry)\n\n # When the command is None, it means the navigation task has not started yet.\n # In this case, just take a random step\n if command is None:\n action = xwd_random_step(env)\n else:\n action, _ = act_net(variable(image, volatile=True), command=variable(command, volatile=True), act_only=True)\n action = action.data[0,0]\n reward = env.take_actions({'action': action, 'pred_sentence': ''})\n rewards.append(float(reward))\n frame_cnt += 1\n\n # Get the next state\n next_state = xwd_get_state(config, env, vocab, command)\n\n # Store an effective navigation transition in navigation memory\n if command is not None:\n entry = (curr_state[0], curr_state[1]), action, (next_state[0], next_state[1]), reward\n nav_memory.push(entry)\n\n # Move to the next state\n curr_state = next_state\n \n # Train the act_net with a mini-batch sampled from the replay memory\n if frame_cnt > 0 and frame_cnt % config.train_interval == 0 and \\\n len(nav_memory) >= config.init_size and \\\n len(rec_memory) >= config.init_size//4:\n\n iter_vals = train_model(act_net, tgt_net, rec_memory, nav_memory, optimizer, vocab, vis=(train_cnt % config.log_interval == 0))\n monitor.update_dict(iter_vals)\n train_cnt += 1\n \n # Log training info\n if train_cnt > 0 and train_cnt % config.log_interval == 0 and frame_cnt % config.train_interval == 0:\n disp_str = '#{} {}'.format(train_cnt, eidx)\n disp_str += monitor.disp(reset=True)\n disp_str += done_states.disp(reset=True)\n\n print(disp_str)\n\n # Update target network parameters\n if train_cnt > 0 and train_cnt % 2000 == 0 and frame_cnt % config.train_interval == 0:\n copy_state(act_net, tgt_net)\n print('=====> Update target network; Current explore alpha {:.3f}'.format(act_net.alpha))\n\n # Sanity check\n if eidx > 0 and eidx % config.eval_interval == 0:\n save_checkpoint(os.path.join(config.save_dir, 'checkpoint', 'chk.ep{}'.format(eidx)), act_net, optimizer, vocab)\n evaluate(eval_env, act_net, vocab)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch xworld training')\n parser.add_argument('--max_episode', type=int, default=500000, metavar='N',\n help='maximum episode (default: 500000)')\n parser.add_argument('--log_interval', type=int, default=200, metavar='N',\n help='interval between training status logs (default: 200)')\n parser.add_argument('--eval_interval', type=int, default=500, metavar='N',\n help='interval between evaluations (default: 500)')\n parser.add_argument('--batch_size', type=int, default=16, metavar='N',\n help='mini batch size used for training(default: 16)')\n parser.add_argument('--save_dir', type=str, default='log_xworld', \n help='directory to save intermediate results')\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\n parser.add_argument('--prioritize', action='store_true', default=False,\n help='enables prioritized memory replay')\n parser.add_argument('--show_screen', action='store_true', default=False,\n help='shows screen for sanity check')\n args = parser.parse_args()\n\n for k, v in args.__dict__.items():\n setattr(config, k, v)\n \n global variable\n variable = create_variable_func(config.cuda)\n\n main()\n" }, { "alpha_fraction": 0.7254902124404907, "alphanum_fraction": 0.7254902124404907, "avg_line_length": 24.5, "blob_id": "1eb912b890be918fef2151ec3d850779a54cc80f", "content_id": "d4accc7c584c4e7c036a4c4cff179b1bcc904926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/utils/__init__.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "from .xworld_utils import *\nfrom .vis_utils import *\nfrom .exp_utils import *\nfrom .rl_utils import *\n" }, { "alpha_fraction": 0.580520749092102, "alphanum_fraction": 0.5849565863609314, "avg_line_length": 30.04790496826172, "blob_id": "ca53652702ea5e396058af13e099416423beeb92", "content_id": "44ae00e15d37057f09b5d6840abd2bfbbfc5d58b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5185, "license_type": "no_license", "max_line_length": 78, "num_lines": 167, "path": "/replay_memory.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom collections import namedtuple\nimport random\n\nfrom segment_tree import SumSegmentTree, MinSegmentTree\n\nclass Experience(object):\n def __init__(self, state, action, reward, done):\n \"\"\"\n state : tuple : image and command\n action : tensor : action taken\n reward : float : immediate reward after taking the action\n done : bool : whether the episode ends after taking the action\n \"\"\"\n self.state = state\n self.action = action\n self.reward = reward\n self.done = done\n\nclass ReplayMemory(object):\n def __init__(self, capacity, pack_func=None):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n self.pack_func = pack_func\n\n def push(self, transition):\n \"\"\"Saves a transition.\"\"\"\n if self.position >= len(self.memory):\n self.memory.append(transition)\n else:\n self.memory[self.position] = transition\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n \"\"\"Packs a batch\"\"\"\n batch = random.sample(self.memory, batch_size)\n if self.pack_func is not None:\n batch = self.pack_func(batch)\n return batch\n\n def __len__(self):\n return len(self.memory)\n\nclass PrioritizedReplayMemory(object):\n def __init__(self, capacity, alpha=0.6, pack_func=None):\n self.capacity = capacity\n\n assert alpha > 0\n self._alpha = alpha\n\n self.pack_func = pack_func\n\n self.memory = []\n self.position = 0\n\n it_capacity = 1\n while it_capacity < capacity:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def push(self, transition):\n \"\"\"Saves a transition.\"\"\"\n if self.position >= len(self.memory):\n self.memory.append(transition)\n else:\n self.memory[self.position] = transition\n\n self._it_sum[self.position] = self._max_priority ** self._alpha\n self._it_min[self.position] = self._max_priority ** self._alpha\n\n self.position = (self.position + 1) % self.capacity\n\n def _sample_proportional(self, batch_size):\n indices = []\n while len(indices) < batch_size:\n mass = random.random() * self._it_sum.sum(0, len(self.memory) - 1)\n idx = self._it_sum.find_prefixsum_idx(mass)\n if idx not in indices:\n indices.append(idx)\n return indices\n\n def sample(self, batch_size, beta):\n \"\"\"Packs a batch\"\"\"\n indices = self._sample_proportional(batch_size)\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self.memory)) ** (-beta)\n\n batch = []\n for idx in indices:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self.memory)) ** (-beta)\n weights.append(weight / max_weight)\n batch.append(self.memory[idx])\n weights = torch.Tensor(weights)\n\n if self.pack_func is not None:\n batch = self.pack_func(batch)\n return batch, weights, indices\n\n def update_priorities(self, indices, priorities):\n \"\"\"Update priorities of sampled transitions.\n\n sets priority of transition at index indices[i] in buffer\n to priorities[i].\n\n Parameters\n ----------\n indices: [int]\n List of indices of sampled transitions\n priorities: [float]\n List of updated priorities corresponding to\n transitions at the sampled indices denoted by\n variable `indices`.\n \"\"\"\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n assert priority > 0, priority\n assert 0 <= idx < len(self.memory)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)\n\n def __len__(self):\n return len(self.memory)\n\n\ndef pack_varlen_seqs(seqs):\n max_len = max(seq.size(0) for seq in seqs)\n out = seqs[0].new(max_len, len(seqs)).fill_(0)\n for i in range(len(seqs)):\n seqlen = seqs[i].size(0)\n out[:seqlen,i].copy_(seqs[i][:,0])\n\n return out\n\ndef pack_batch_nav(batch):\n def pack_state(state):\n image, command = state\n image = torch.cat(image, dim=0)\n command = pack_varlen_seqs(command)\n\n return image, command\n\n curr_state, action, next_state, reward = zip(*batch)\n\n curr_state = pack_state(zip(*curr_state))\n next_state = pack_state(zip(*next_state))\n action = torch.LongTensor(action)\n reward = torch.Tensor(reward)\n\n return curr_state, action, next_state, reward\n\ndef pack_batch_rec(batch):\n image, question, answer = zip(*batch)\n image = torch.cat(image, dim=0)\n answer = torch.cat(answer, dim=0)\n question = pack_varlen_seqs(question)\n\n return image, question, answer\n" }, { "alpha_fraction": 0.5767885446548462, "alphanum_fraction": 0.589984118938446, "avg_line_length": 34.7386360168457, "blob_id": "a39c97bfc9f52e8371d85f4aa3e47e192b966d52", "content_id": "60da5f684dc9ac5a028e671cdfdff9be60ac11df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6290, "license_type": "no_license", "max_line_length": 105, "num_lines": 176, "path": "/functions.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.utils.rnn as rnn_utils\n\nif torch.__version__ == '0.1.12_2':\n kwargs = {}\nelse:\n kwargs = {keepdim:True}\n\n#################### Math ####################\ndef entropy_from_prob(prob, dim=-1):\n return -torch.sum(torch.log(1e-6 + prob) * prob, dim=dim)\n\ndef log_sum_exp(logits):\n max_logits = logits.max(1, **kwargs)[0]\n\n return torch.log(1e-6 + (logits - max_logits.expand_as(logits)).exp().sum(1)) + max_logits.squeeze(1)\n\n#################### Attention ####################\ndef temporal_weigthed_avg(context, attn, batch_first=False):\n \"\"\"\n context : [batch x seqlen x dim] if batch_first else [seqlen x batch x dim]\n attn : [batch x seqlen]\n \"\"\"\n if not batch_first:\n context = context.transpose(0, 1) # batch x seqlen x dim\n avg_context = torch.bmm(attn.unsqueeze(1), context).squeeze(1) # batch x dim\n\n return avg_context\n\ndef spatial_weighted_avg(context, attn):\n \"\"\"\n context : [batch x dim x H x W]\n attn : [batch x 1 x H x W]\n \"\"\"\n \n avg_context = (context * attn.expand_as(context)) \\\n .sum(3, **kwargs).sum(2, **kwargs) \\\n .squeeze(3).squeeze(2) # batch x dim\n\n return avg_context\n\ndef dotprod_attention(context, query, mask=None):\n \"\"\"\n context : [seqlen x batch x dim]\n query : [ 1 x batch x dim]\n mask. : [seqlen x batch]\n \"\"\"\n context = context.transpose(0, 1) # batch x seqlen x dim\n query = query.squeeze(0).unsqueeze(2) # batch x dim x 1\n attn = torch.bmm(context, query).squeeze(2) # batch x seqlen\n if mask is not None:\n attn.data.masked_fill_(mask.t(), -float('inf'))\n attn_prob = F.softmax(attn)\n \n return attn_prob\n\ndef cosine_attention(context, query, mask=None):\n \"\"\"\n context : [seqlen x batch x dim]\n query : [ 1 x batch x dim]\n mask. : [seqlen x batch]\n \"\"\"\n \n context = context / (1e-6 + context.norm(p=2, dim=2, **kwargs)).expand_as(context)\n query = query / (1e-6 + query.norm(p=2, dim=2, **kwargs).expand_as(query))\n\n return dotprod_attention(context, query, mask=None)\n\n#################### RNN ####################\ndef bihidden_to_unihidden(h):\n \"\"\"\n Concat the final hidden states (fwd and bwd) of a bidirectional RNN \n to create a hidden state for a unidirecitonal RNN\n \"\"\"\n return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \\\n .transpose(1, 2).contiguous() \\\n .view(h.size(0) // 2, h.size(1), h.size(2) * 2)\n\ndef create_rnn_step(rnn):\n \"\"\"\n Given an RNN, create a corresponding one-step function\n \"\"\"\n def step_func(input, hid=None):\n rnn_step = F._functions.rnn.AutogradRNN(\n mode=rnn.mode, input_size=rnn.input_size, hidden_size=rnn.hidden_size, \n num_layers=rnn.num_layers, dropout=rnn.dropout,\n train=rnn.training, bidirectional=rnn.bidirectional)\n return rnn_step(input, rnn.all_weights, hid)\n\n return step_func\n\ndef check_decreasing(lengths):\n \"\"\"\n Check whether the lengths tensor are in descreasing order.\n - If true, return None\n - Else, return a decreasing lens with two mappings\n\n This is used for variable length RNN\n \"\"\"\n lens, order = torch.sort(lengths, 0, True) \n if torch.ne(lens, lengths).sum() == 0:\n return None\n else:\n _, rev_order = torch.sort(order)\n\n return lens, Variable(order), Variable(rev_order)\n\ndef rnn_varlen(rnn, seq, emb, hidden=None):\n \"\"\"\n Process a sequential embedding (`emb`) using a variable length RNN,\n where the LongTensor (`seq`) is passed in to compute the mask and lengths\n \"\"\"\n padmask = seq.data.eq(0)\n lengths = seq.data.ne(0).sum(0, **kwargs).squeeze(0)\n check_res = check_decreasing(lengths)\n\n if check_res is None:\n packed_emb = rnn_utils.pack_padded_sequence(emb, lengths.tolist())\n packed_out, hidden_final = rnn(packed_emb, hidden)\n outputs, srclens = rnn_utils.pad_packed_sequence(packed_out)\n else:\n lens, order, rev_order = check_res\n packed_emb = rnn_utils.pack_padded_sequence(emb.index_select(1, order), lens.tolist())\n packed_out, hidden_final = rnn(packed_emb, hidden)\n outputs, srclens = rnn_utils.pad_packed_sequence(packed_out)\n outputs = outputs.index_select(1, rev_order)\n hidden_final = hidden_final.index_select(1, rev_order)\n\n if padmask.size(0) > outputs.size(0):\n padmask = padmask.narrow(0, 0, outputs.size(0))\n\n return outputs, hidden_final, padmask, lengths\n\n#################### Misc ####################\ndef create_centered_map(batch_size, grid_size):\n centered_map = Variable(torch.zeros(batch_size, 1, grid_size, grid_size))\n centered_map.data[:,:,grid_size//2, grid_size//2].fill_(1.)\n \n return centered_map\n\ndef conv_size(in_size, kernel_size, stride=1, padding=0, dilation=1):\n out_size = (in_size + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1\n \n return out_size\n\ndef grad_norm(parameters, norm_type=2):\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n norm_type = float(norm_type)\n if norm_type == float('inf'):\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n return total_norm\n\ndef param_norm(parameters, norm_type=2):\n parameters = list(parameters)\n norm_type = float(norm_type)\n if norm_type == float('inf'):\n total_norm = max(p.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.data.norm(norm_type)\n total_norm += param_norm ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n return total_norm\n\ndef linear_schedule(init_val, final_val, progress):\n return init_val + min(1., progress) * (final_val - init_val)\n" }, { "alpha_fraction": 0.6059073209762573, "alphanum_fraction": 0.6159552335739136, "avg_line_length": 49.78082275390625, "blob_id": "2c0c37b562dbcc92dbd2c49e99aaa5745f9baca4", "content_id": "e5b7663caf27f98664558d764c8444fcd4235635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14829, "license_type": "no_license", "max_line_length": 194, "num_lines": 292, "path": "/model.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.utils.rnn as rnn_utils \n\nfrom functions import *\nfrom utils import *\nimport random\n\nUSE_VARLEN_RNN = True\n\nclass Expression(nn.Module):\n def __init__(self, func):\n super(Expression, self).__init__()\n self.func = func\n \n def forward(self, input):\n return self.func(input)\n\nclass Agent(nn.Module):\n def __init__(self, config, vocab_size):\n super(Agent, self).__init__()\n\n self.config = config\n self.alpha = config.alpha\n\n # WordEmbed maintains the embedding matrix which is also shared as the softmax matrix in recognition task\n self.WordEmbed = nn.Embedding(vocab_size, config.word_embed_dim, padding_idx=0)\n\n # FuncEmbed_MLP and SyntEmbed_MLP project word embeddings into functional embeddings and syntactic embeddings respectively\n self.FuncEmbed_MLP = nn.Sequential(\n nn.Linear(config.word_embed_dim, config.word_embed_dim//2), nn.Tanh(), \n nn.Linear(config.word_embed_dim//2, config.func_embed_dim), nn.Tanh()\n )\n self.SyntEmbed_MLP = nn.Sequential(\n nn.Linear(config.word_embed_dim, config.word_embed_dim//2), nn.Tanh(), \n nn.Linear(config.word_embed_dim//2, config.synt_embed_dim), nn.Tanh()\n )\n\n # Controller_RNN takes the syntactic embedding as input and encode it\n self.Controller_RNN = nn.GRU(config.synt_embed_dim, config.lang_hidden_dim, bidirectional=True)\n \n # Context_MLP projects the concatenation of syntactic embedding and controller output into context vectors\n self.Context_MLP = nn.Sequential(nn.Linear(config.synt_embed_dim+2*config.lang_hidden_dim, config.lang_hidden_dim), nn.Tanh())\n \n # Booting_MLP projects the final state of the controller into the initial state of the programmer RNN\n self.Booting_MLP = nn.Sequential(nn.Linear(2*config.lang_hidden_dim, config.lang_hidden_dim), nn.Tanh())\n \n # Encoder_MLP projects the controller output into the context key used in computing word attention\n self.Encoder_MLP = nn.Sequential(nn.Linear(config.lang_hidden_dim, config.lang_hidden_dim), nn.Tanh())\n\n # Programmer_RNN maintains the recurrent internal state of the Programmer\n self.Programmer_RNN = nn.GRU(config.lang_hidden_dim, config.lang_hidden_dim)\n\n # Sigma_MLP computes the ratio for mixing the grid attention map across programming steps\n # self.Sigma_MLP = nn.Sequential(nn.Linear(2*config.lang_hidden_dim, 1, bias=False), nn.Sigmoid())\n self.Sigma_MLP = nn.Sequential(nn.Linear(2*config.lang_hidden_dim, 2, bias=False), nn.Softmax())\n\n # Mask_MLP takes functional embeddings as input to produce a mask\n self.Mask_MLP = nn.Sequential(\n nn.Linear(config.func_embed_dim, config.func_embed_dim), nn.Tanh(), \n nn.Linear(config.func_embed_dim, config.word_embed_dim), nn.Sigmoid()\n )\n\n # Question_RNN encodes the question in recognition task\n self.Question_RNN = nn.GRU(config.func_embed_dim, config.func_embed_dim)\n\n # Visual_CNN extracts visual information from the raw image input\n self.Visual_CNN = nn.Sequential(\n nn.Conv2d( 3, config.vis_num_filters[0], config.vis_filter_sizes[0], stride=config.vis_strides[0]), nn.ReLU(), \n nn.Conv2d(config.vis_num_filters[0], config.vis_num_filters[1], config.vis_filter_sizes[1], stride=config.vis_strides[1]), nn.ReLU(), \n nn.Conv2d(config.vis_num_filters[1], config.vis_num_filters[2], config.vis_filter_sizes[2], stride=config.vis_strides[2]), nn.ReLU(), \n nn.Conv2d(config.vis_num_filters[2], config.vis_num_filters[3], config.vis_filter_sizes[3], stride=config.vis_strides[3]), nn.ReLU()\n )\n \n # feat_map_spatial as a trainable parameter represents the spatial/directional information\n self.feat_map_spatial = nn.Parameter(torch.Tensor(config.feat_spatial_dim, config.grid_size, config.grid_size))\n\n # Env_CNN transforms visual feature map into an environment map (with only one channel) using 1x1 convolution\n self.Env_CNN = nn.Sequential(nn.Conv2d(in_channels=config.feat_visual_dim, out_channels=1, kernel_size=1), nn.Sigmoid())\n\n # State_Net transforms the concatenation of the environment map and the grid attention map into the state representation of the MDP\n self.State_Net = nn.Sequential(\n nn.Conv2d( 2, config.act_num_filters[0], config.act_filter_size, padding=config.act_padding), nn.ReLU(),\n nn.Conv2d(config.act_num_filters[0], config.act_num_filters[1], config.act_filter_size, padding=config.act_padding), # NOTE: No activition here \n Expression(lambda x: x.view(x.size(0), -1)), # Flatten 4D tensor to 2D tensor\n nn.Linear(config.act_num_filters[1]*config.grid_size*config.grid_size, config.act_hidden_dim), nn.ReLU(),\n nn.Linear(config.act_hidden_dim, config.act_hidden_dim), nn.ReLU()\n )\n\n # Action_Net outputs the policy action probability given the state representation\n self.Action_Net = nn.Sequential(nn.Linear(config.act_hidden_dim, config.num_actions), nn.Softmax())\n\n # Value_Net outputs the predicted value given the current state representation\n self.Value_Net = nn.Sequential(\n nn.Linear(config.act_hidden_dim, config.act_hidden_dim//2), nn.ReLU(), \n nn.Linear(config.act_hidden_dim//2, 1)\n )\n\n # ===== Additional working memory\n self.inv_idx = torch.arange(config.grid_size-1,-1,-1).long()\n # self.inv_idx = Variable(torch.arange(config.grid_size-1,-1,-1).long())\n if config.cuda: self.inv_idx = self.inv_idx.cuda()\n\n # Parameter intialization\n self._reset_parameters()\n\n def _reset_parameters(self):\n def normal_init(name, std):\n def init_func(m):\n if name in m.__class__.__name__:\n m.weight.data.normal_(mean=0., std=std)\n if m.bias is not None:\n m.bias.data.fill_(0.)\n return init_func\n\n def gru_init(m):\n if 'GRU' in m.__class__.__name__:\n for l in range(m.num_layers):\n for name in ['ih', 'hh']:\n weight = getattr(m, 'weight_{}_l{}'.format(name, l))\n w_size = weight.data.numel()\n weight.data.normal_(mean=0., std=1.0)\n\n def smart_init(name):\n def init_func(m):\n if name in m.__class__.__name__:\n w_size = m.weight.data.numel()\n m.weight.data.normal_(mean=0., std=np.sqrt(2./w_size))\n if m.bias is not None:\n m.bias.data.fill_(0.)\n return init_func\n\n def zero_init(m):\n if 'Linear' in m.__class__.__name__: \n m.weight.data.fill_(0.)\n if m.bias is not None:\n m.bias.data.fill_(0.)\n\n self.WordEmbed.weight.data.normal_(mean=0., std=1.)\n self.feat_map_spatial.data.zero_()\n\n self.apply(normal_init('Conv2d', std=0.1))\n\n def get_grid_attention(self, feat_map, text):\n config = self.config\n\n ##### Language\n ## Embedding\n word_embed = self.WordEmbed(text)\n func_embed = self.FuncEmbed_MLP(word_embed.view(-1, config.word_embed_dim)).view(word_embed.size(0), -1, config.func_embed_dim)\n synt_embed = self.SyntEmbed_MLP(word_embed.view(-1, config.word_embed_dim)).view(word_embed.size(0), -1, config.synt_embed_dim)\n\n ## Controller\n if USE_VARLEN_RNN:\n encoded_all, encoded_final, padmask, lengths = rnn_varlen(self.Controller_RNN, text, synt_embed)\n else:\n encoded_all, encoded_final = self.Controller_RNN(synt_embed)\n context_vec = self.Context_MLP(torch.cat([encoded_all, synt_embed], dim=2).view(-1, config.synt_embed_dim+2*config.lang_hidden_dim)).view(encoded_all.size(0), -1, config.lang_hidden_dim)\n\n encoded_final = bihidden_to_unihidden(encoded_final)\n programmer_boot = self.Booting_MLP(encoded_final.view(-1, 2*config.lang_hidden_dim)).view(encoded_final.size(0), -1, config.lang_hidden_dim)\n\n ## Programmer\n # initial states for the programmer recurrence\n cached_attn = create_centered_map(feat_map.size(0), config.grid_size)\n if config.cuda: cached_attn = cached_attn.cuda()\n\n # obtain the \"unit norm\" context key used for attention; unit norm is used because we want cosine similarity\n context_key = self.Encoder_MLP(context_vec.view(-1, config.lang_hidden_dim)).view(context_vec.size(0), -1, config.lang_hidden_dim)\n\n # core programming recurrence\n grid_attns, cached_attns, heatmaps, seq_attns, masks, sigmas = [], [], [], [], [], []\n hiddens = [programmer_boot]\n program_rnn_step = create_rnn_step(self.Programmer_RNN)\n for step in range(config.program_steps):\n if USE_VARLEN_RNN:\n attn_prob = cosine_attention(context_key, hiddens[-1], padmask)\n else:\n attn_prob = cosine_attention(context_key, hiddens[-1])\n avg_context_vec = temporal_weigthed_avg(context_vec, attn_prob)\n avg_word_embed = temporal_weigthed_avg(word_embed, attn_prob)\n avg_func_embed = temporal_weigthed_avg(func_embed, attn_prob)\n\n # update programmer RNN hidden state using average context vectors\n _, new_hidden = program_rnn_step(avg_context_vec.unsqueeze(0), hiddens[-1])\n hiddens.append(new_hidden)\n\n # compute masked avg_word_embed\n mask = self.Mask_MLP(avg_func_embed)\n masked_word_embed = avg_word_embed * mask\n\n # create a spatially normalized heatmap which will be used as \"filter\" to convolve the grid_attn_map and achieve 2D translation\n # - Dimension: masked_word_embed [batch x dim], feat_map [batch x dim x grid x grid]\n heatmap = F.softmax(torch.bmm(masked_word_embed.unsqueeze(1), feat_map.view(feat_map.size(0), feat_map.size(1), -1)).squeeze(1))\n heatmap = heatmap.view(feat_map.size(0), 1, feat_map.size(2), feat_map.size(3))\n\n # rotation\n inv_idx = Variable(self.inv_idx)\n trans_filter = heatmap.index_select(2, inv_idx).index_select(3, inv_idx)\n\n # mapping to the terms in the paper: trans_filter is the `o(a_s'')`, cached_attn is `a_{s-1}'`, grid_attn is the `a_s`\n # a_s = Conv2d ( o(a_s'') , a_{s-1}' )\n grid_attn = nn.functional.conv2d(cached_attn.permute(1, 0, 2, 3), trans_filter, padding=config.grid_size//2, groups=trans_filter.size(0)).permute(1, 0, 2, 3)\n\n # soft update cached_attn\n sigma = self.Sigma_MLP(torch.cat([new_hidden.squeeze(0), avg_context_vec], dim=1))\n\n prev_sigma = sigma.narrow(1, 0, 1)\n curr_sigma = sigma.narrow(1, 1, 1)\n prev_sigma_expand = prev_sigma.unsqueeze(2).unsqueeze(3).expand_as(cached_attn)\n curr_sigma_expand = curr_sigma.unsqueeze(2).unsqueeze(3).expand_as(grid_attn)\n cached_attn = prev_sigma_expand * cached_attn + curr_sigma_expand * grid_attn\n \n # quantities to monitor\n cached_attns.append(cached_attn)\n grid_attns.append(grid_attn)\n seq_attns.append(attn_prob)\n heatmaps.append(heatmap)\n sigmas.append(prev_sigma)\n masks.append(mask)\n\n return grid_attn, func_embed, {'cached_attns':cached_attns, 'grid_attns':grid_attns, 'heatmaps':heatmaps, 'seq_attns':seq_attns, 'masks':masks, 'sigmas':sigmas}\n\n def get_action(self, state):\n action_prob = self.Action_Net(state)\n if self.training:\n if random.random() > self.alpha:\n action = action_prob.multinomial(1)\n else:\n action = Variable(torch.LongTensor(action_prob.size(0), 1).random_(4))\n if self.config.cuda: action = action.cuda()\n else:\n action = action_prob.multinomial(1)\n\n return action, action_prob\n\n def get_value(self, state):\n value = self.Value_Net(state)\n return value\n\n def forward(self, image, command=None, question=None, act_only=False, val_only=False):\n assert not (act_only and val_only), 'act_only and val_only cannot be both True'\n \n config = self.config\n\n ##### Perception\n feat_map_visual = self.Visual_CNN(image)\n feat_map = torch.cat([feat_map_visual, self.feat_map_spatial.unsqueeze(0).expand_as(feat_map_visual)], dim=1)\n # feat_map = torch.cat([feat_map_visual, expand_as(self.feat_map_spatial.unsqueeze(0), feat_map_visual)], dim=1)\n\n ##### Action and Value\n if command is not None:\n grid_attn_act, _, nav_info = self.get_grid_attention(feat_map, command)\n env_map = self.Env_CNN(feat_map_visual)\n state = self.State_Net(torch.cat([env_map, grid_attn_act], dim=1))\n\n nav_info['env_map'] = env_map\n\n if act_only:\n action, action_prob = self.get_action(state)\n return action, nav_info\n elif val_only:\n value = self.get_value(state)\n return value, nav_info\n else:\n action, action_prob = self.get_action(state)\n value = self.get_value(state)\n return action, action_prob, value, nav_info\n\n ##### Recognition\n if question is not None:\n rec_grid_attn, rec_func_embed, rec_info = self.get_grid_attention(feat_map, question)\n rec_feat = spatial_weighted_avg(feat_map, rec_grid_attn)\n\n if USE_VARLEN_RNN:\n rec_hidden_all, rec_hidden_final, rec_padmask, rec_lengths = rnn_varlen(self.Question_RNN, question, rec_func_embed)\n avg_rec_func_embed = rec_hidden_all.sum(0, **kwargs).squeeze(0)\n avg_rec_func_embed = avg_rec_func_embed / Variable(rec_lengths.float()).unsqueeze(1).expand_as(avg_rec_func_embed)\n # avg_rec_func_embed = avg_rec_func_embed / expand_as(Variable(rec_lengths.float()).unsqueeze(1), avg_rec_func_embed)\n else:\n avg_rec_func_embed = self.Question_RNN(rec_func_embed)[0].mean(0).squeeze(0)\n\n rec_mask = self.Mask_MLP(avg_rec_func_embed)\n rec_logit = torch.mm(rec_feat * rec_mask, self.WordEmbed.weight.t())\n\n # monitor information\n rec_info['rec_mask'] = rec_mask\n\n return rec_logit, rec_info\n" }, { "alpha_fraction": 0.6579139232635498, "alphanum_fraction": 0.6644784808158875, "avg_line_length": 47.105262756347656, "blob_id": "3e3ccef86beafceb98ed967a69d61d7e9ddbe920", "content_id": "9f6fd5b5e3d7d8966964cbac27ed3d832c81174c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2742, "license_type": "no_license", "max_line_length": 130, "num_lines": 57, "path": "/utils/xworld_utils.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\ndef compute_block_alignment(filter_sizes, strides, block_size):\n \"\"\"\n Given the size of a block (grid) on the original input image,\n the sizes of filters that are used to convolve the image, and the strides\n of the corresponding convolutions, automatically compute the final\n filter size and the final stride so that each pixel on the final conv\n layer represents exactly the image feature within a block in the input image\n \"\"\"\n final_filter_size = block_size\n final_stride = block_size\n for i in range(len(filter_sizes)):\n final_filter_size = (final_filter_size - filter_sizes[i]) // strides[i] + 1\n final_stride = (final_stride - 1) // strides[i] + 1\n assert(final_filter_size > 0 and final_stride > 0)\n return final_filter_size, final_stride\n\ndef xwd_random_step(env):\n return env.take_actions({'action':np.random.randint(0, 4), 'pred_sentence':''})\n\ndef xwd_get_state(config, env, vocab, command):\n # raw state\n env_state = env.get_state()\n\n task = env_state['task']\n\n # ===== Empty frame without seeing a navigation command for the current episode =====\n # - NOTE: This is weird because we have to take random steps in oder to change \n # the current state. So, one has to define how to take a random step \n # for each task. Ideally, the robot should receive either a command or \n # a question in the very beginning of an episode.\n if not task and command is None:\n return None, None, None, None\n\n # ===== When the program reaches here, it means the robot has a task at this frame =====\n # Case 1: frame with a recognition task (and possibly also a continuing navigation task)\n if 'XWorldRec' in task:\n tokens = env_state['sentence'].lower().split()\n question = vocab.convert_to_idx(tokens[:-1], unk='#oov#').unsqueeze(1) # [seqlen x 1]\n answer = vocab.convert_to_idx(tokens[-1:], unk='#oov#')\n # Case 2: first frame of a navigation task\n elif 'XWorldNav' in task:\n assert command is None or env_state['sentence'] == 'Well done .', 'Each episode should only have a single navigation task'\n tokens = env_state['sentence'].lower().split()\n command = vocab.convert_to_idx(tokens, unk='#oov#').unsqueeze(1) # [seqlen x 1]\n question, answer = None, None\n # Case 3: continuing frame of a navigation task\n else:\n assert command is not None, 'Should not reach here with command being {}'.format(command)\n question, answer = None, None\n \n image = torch.Tensor(env_state['screen']).view(1, 3, config.img_size, config.img_size)\n\n return image, command, question, answer\n" }, { "alpha_fraction": 0.7223214507102966, "alphanum_fraction": 0.7571428418159485, "avg_line_length": 85.15384674072266, "blob_id": "04e9161d0fb7637a58d15e880ef52e08e097e170", "content_id": "33c8b689fff13378adf23da4cf55c1781cb41c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 314, "num_lines": 13, "path": "/README.md", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "# pytorch_xworld\nThis is a pytorch implementation of the xworld experiment in [*A Deep Compositional Framework for Human-like Language Acquisition in Virtual Environment*](https://arxiv.org/abs/1703.09831), Haonan Yu, Haichao Zhang, Wei Xu, arXiv 1703.09831, 2017.\n\nTo run the code:\n* build the XWorld environment from https://github.com/PaddlePaddle/XWorld \n* add the directory holding the \".so\" file of the environment to the PYTHON_PATH\n* for training, run `python trainer.py --cuda`\n* for evaluation, run `python evaluate.py --cuda` \n\nNotes:\n* It seems the XWorld environment only supports python 2.7 now. Due to that, one can only use python 2.7 to run the code. \n* The code is written and tested under pytorch 0.1.12. Due to a lot of changes in the new version (2.0.0), I'm not sure whether the code runs perfectly in the newest version. So, a safe choice is to first try on the older pytorch version. The installation goes by `conda install pytorch=0.1.12 cuda80 -c soumith`. \n* One may encounter a torchvision issue with the key word pad_value. To resolve that, do `pip install git+https://github.com/pytorch/vision.git`.\n" }, { "alpha_fraction": 0.5462962985038757, "alphanum_fraction": 0.5891203880310059, "avg_line_length": 24.791044235229492, "blob_id": "7c8e7a5cc93d499540cef81e9f7e8fca01bf92d3", "content_id": "3cc027ae3fe567bba2c1c49a02725a3d7f43f598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1728, "license_type": "no_license", "max_line_length": 92, "num_lines": 67, "path": "/config.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import utils\n\nclass xworld_config:\n ##### Environment parameters\n env_name = 'xworld'\n\n vocab_dir = './data'\n conf_path = 'data/navigation.json'\n eval_conf_path = 'data/navigation.json'\n\n curriculum = 10000\n need_mem_cmd = 0\n\n grid_size = 13\n pixel_per_grid = 12\n img_size = pixel_per_grid * grid_size\n\n num_actions = 4\n\n ##### Language Module\n word_embed_dim = 1024\n synt_embed_dim = 128\n func_embed_dim = 128\n lang_hidden_dim = 128\n\n program_steps = 3\n\n ##### Action Module\n act_hidden_dim = 512\n act_filter_size = 3\n act_num_filters = [num_actions * 16, num_actions]\n act_padding = act_filter_size // 2\n\n ##### Perception Module\n feat_visual_dim = word_embed_dim // 2\n feat_spatial_dim = word_embed_dim - feat_visual_dim\n\n vis_num_filters = [64, 64, feat_visual_dim, feat_visual_dim]\n vis_filter_sizes = [3, 2, 2]\n vis_strides = [3, 2, 2]\n ffs, fs = utils.compute_block_alignment(vis_filter_sizes, vis_strides, pixel_per_grid)\n vis_filter_sizes.append(ffs)\n vis_strides.append(fs)\n\n ##### Reinforcement learning\n explore_frame = 500000 # number of frames using epsilon greedy exploration\n alpha = 1.0 # epsilon greedy\n\n replay_size = 10000 # max size of replay memory\n init_size = 1000 # number of memory entries from which the training starts\n prioritize = False\n beta0 = 0.5\n\n gamma = 0.99 # discount factor\n\n ##### Optimization\n batch_size = 16\n train_interval = 4\n\n algo = 'rmsprop'\n lr = 1e-5\n mom = 0.9\n w_decay = 0.\n\n ##### Monitoring\n monitor_gnorm = False\n monitor_mask = False\n" }, { "alpha_fraction": 0.5693827867507935, "alphanum_fraction": 0.571198046207428, "avg_line_length": 29.79503059387207, "blob_id": "e7373dc10dc0427d25b9f23398f9589cab04b44b", "content_id": "bf278ddf9aa1007b7e5592a04db8b9673ea51a9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4958, "license_type": "no_license", "max_line_length": 125, "num_lines": 161, "path": "/utils/exp_utils.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import os, sys\nimport shutil\nfrom glob import glob\nfrom collections import OrderedDict, Sequence\n\nimport torch\nimport torch.multiprocessing as mp\nfrom torch.autograd import Variable\n\nimport numpy as np\n\nfrom datetime import datetime\nimport time\nfrom vocab import Vocab\n\nif sys.version_info >= (3,0):\n raw_input = input\n\nclass Monitor(object):\n def __init__(self, count=True, default_val=0., track_time=False):\n self.count = count\n self.default_val = default_val\n self.track_time = track_time\n \n self.reset()\n\n def update(self, key, val):\n if key not in self.monitor:\n self.monitor[key] = self.default_val\n self.monitor[key] += val\n\n if self.count:\n if key not in self.counter:\n self.counter[key] = 0.\n self.counter[key] += 1.\n\n def update_dict(self, kv_dict):\n for k, v in kv_dict.items():\n self.update(k, v)\n\n def reset(self):\n self.monitor = OrderedDict()\n if self.count:\n self.counter = OrderedDict()\n if self.track_time:\n self.reset_time = time.time()\n\n def disp(self, reset=False):\n disp_str = ''\n if self.track_time:\n disp_str += ' time {:.2f}'.format(time.time() - self.reset_time)\n for key in sorted(self.monitor.keys()):\n if self.count:\n disp_str += ' | {}: {:.4f}'.format(key, self.monitor[key] / self.counter[key])\n else:\n disp_str += ' | {}: {}'.format(key, self.monitor[key])\n\n if reset:\n self.reset()\n\n return disp_str\n\ndef recursive_map(seq, func):\n for item in seq:\n if isinstance(item, Sequence):\n yield type(item)(recursive_map(item, func))\n else:\n yield func(item)\n\ndef recursive_to_numpy(seq):\n for item in seq:\n if isinstance(item, Sequence):\n yield type(item)(recursive_to_numpy(item))\n else:\n if torch.is_tensor(item):\n yield item.numpy()\n else:\n yield item\n\ndef recursive_from_numpy(seq):\n for item in seq:\n if isinstance(item, Sequence):\n yield type(item)(recursive_from_numpy(item))\n else:\n if isinstance(item, np.ndarray):\n yield torch.from_numpy(item)\n else:\n yield item\n\ndef log_experiment_config(config):\n disp_str = ''\n for attr in sorted(dir(config), key=lambda x: len(x)):\n if not attr.startswith('__'):\n disp_str += '{} : {}\\n'.format(attr, getattr(config, attr))\n print(disp_str)\n\ndef create_log_dir(root_dir, sub_dirs=['checkpoint']):\n if os.path.exists(root_dir):\n while True:\n command = raw_input('Directory {} already exists. Do you want to delete it and proceed [Y|N]: '.format(root_dir))\n if command in ['y', 'Y']:\n shutil.rmtree(root_dir)\n break\n elif command in ['n', 'N']:\n sys.exit()\n else:\n print('Unrecognized value: {}. Please re-enter the command.'.format(command))\n\n os.makedirs(root_dir)\n for sub_dir in sub_dirs:\n os.makedirs(\"%s/%s\" % (root_dir, sub_dir))\n\ndef load_vocab(vocab_dir, special_syms=None):\n if not os.path.exists(os.path.join(vocab_dir, 'vocab.pt')):\n vocab = Vocab(special_syms)\n for idx, line in enumerate(open(os.path.join(vocab_dir, 'dict.txt'), 'r')):\n vocab.add(line.strip())\n torch.save(vocab, os.path.join(vocab_dir, 'vocab.pt'))\n else:\n vocab = torch.load(os.path.join(vocab_dir, 'vocab.pt'))\n print('Vocab size: {}'.format(vocab.size()))\n\n return vocab\n\ndef save_checkpoint(save_path, model, optimizer, vocab, description=None):\n model_state_dict = model.state_dict()\n optim_state_dict = optimizer.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'optimizer': optim_state_dict,\n 'vocab': vocab,\n 'description': description\n }\n torch.save(checkpoint, save_path)\n\ndef latest_checkpoint(save_dir, prefix='chk.ep'):\n \"\"\"\n Get the path for the latest checkpoint given the save dir and checkpoint prefix\n \"\"\"\n fns = glob(os.path.join(save_dir, prefix) + '*')\n if len(fns) == 0:\n raise ValueError('No checkpoints with prefix \"{}\" find in the dir \"{}\"'.format(prefix, save_dir))\n latest_fn = max(fns, key=lambda fn: int(fn.split(prefix)[-1]))\n\n return latest_fn\n\ndef create_variable_func(cuda):\n \"\"\"\n Return a modified function that turns tensor to variable which deals with:\n - checking None type\n - transfering to GPU\n \"\"\"\n def variable(tensor, **kwargs):\n if tensor is None:\n return None\n if cuda:\n return Variable(tensor.cuda(), **kwargs)\n else:\n return Variable(tensor, **kwargs)\n\n return variable\n" }, { "alpha_fraction": 0.6331395506858826, "alphanum_fraction": 0.650581419467926, "avg_line_length": 31.471698760986328, "blob_id": "071ca262969f4025f26db709e1bc488a944f66a3", "content_id": "8cff638aeec00e5607c616078e44b2afad0bf28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 101, "num_lines": 53, "path": "/utils/vis_utils.py", "repo_name": "zihangdai/pytorch_xworld", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision.utils as vutils\nimport torchvision.transforms as transforms\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\ndef vis_seq_attn(ax, seq_attn, xticks, yticks, **kwargs):\n ax.set_aspect('equal')\n ax.pcolor(seq_attn, cmap=plt.cm.Blues, **kwargs)\n\n # ax.set_xticks(np.arange(len(xticks))+0.5)\n # ax.set_xticklabels(xticks, fontsize=12, rotation='30')\n # ax.set_yticks(np.arange(len(yticks))+0.5)\n # ax.set_yticklabels(yticks, fontsize=12)\n\n ax.set_xticks(np.arange(len(xticks))+0.5, minor=True)\n ax.set_xticklabels(xticks, fontsize=12, minor=True, rotation='60')\n ax.set_yticks(np.arange(len(yticks))+0.5, minor=True)\n ax.set_yticklabels(yticks, fontsize=12, minor=True)\n\n ax.set_xticks(np.arange(len(xticks)), minor=False)\n ax.set_xticklabels([], minor=False)\n ax.set_yticks(np.arange(len(yticks)), minor=False)\n ax.set_yticklabels([], minor=False)\n\n ax.grid(True, which='major', linestyle='-', color='white')\n\ndef vis_scale_image(image, scale):\n if isinstance(scale, tuple):\n s_w, s_h = scale\n elif isinstance(scale, int):\n s_w, s_h = scale, scale\n else:\n raise ValueError('scale should be either an int or a tuple, but got {}'.format(scale))\n\n B, C, W, H = image.size()\n scaled_image = image.view(B, C, W, 1, H, 1).repeat(1, 1, 1, s_w, 1, s_h).view(B, C, W*s_w, H*s_h)\n\n return scaled_image\n\nopencv_inv_idx = torch.arange(2,-1,-1).long()\ndef opencv_to_rgb(image):\n return image.index_select(1, opencv_inv_idx)\n\ndef clear_axes(axes):\n for ax in axes:\n if isinstance(ax, np.ndarray):\n clear_axes(ax)\n else:\n ax.clear()" } ]
13
KaiserKyle/Kayfabermetrics
https://github.com/KaiserKyle/Kayfabermetrics
01751816c52a81fdcd23b7cdeaa67f1924aa0f5c
beeae1283de6f337c3c19216c569f1d97c3f1029
4c231e7aa4cad76f54f290d0a95b4e0357eb1d5f
refs/heads/master
2021-01-10T15:57:05.239023
2019-03-24T00:02:39
2019-03-24T00:02:39
45,817,520
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5560134053230286, "alphanum_fraction": 0.5869173407554626, "avg_line_length": 29.57480239868164, "blob_id": "7b47ad85afe24c328a57569fe73d2ddaa268e40d", "content_id": "b09d8e27397b2fe40a4df105e1125ef327bd983f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3883, "license_type": "no_license", "max_line_length": 225, "num_lines": 127, "path": "/php/wrestlerorigins.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n <head>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/wp-content/themes/justwrite/style.css\">\n </head>\n <style>\n#chart {\n height: 1000px;\n}\n\n.node rect {\n cursor: move;\n fill-opacity: .9;\n shape-rendering: crispEdges;\n}\n\n.node text {\n pointer-events: none;\n text-shadow: 0 1px 0 #fff;\n}\n\n.link {\n fill: none;\n stroke: #000;\n stroke-opacity: .2;\n}\n\n.link:hover {\n stroke-opacity: .5;\n}\n\n</style>\n\n <body>\n <div id=\"chart\"></div>\n<script src=\"http://d3js.org/d3.v2.min.js?2.9.1\"></script>\n<script src=\"sankey.js\"></script>\n<script>\n\nvar margin = {top: 10, right: 10, bottom: 10, left: 10},\n width = 1000 - margin.left - margin.right,\n height = 1000 - margin.top - margin.bottom;\n\nvar formatNumber = d3.format(\",.0f\"),\n format = function(d) { return formatNumber(d) + \" Wrestlers\"; },\n color = d3.scale.category20();\n\nvar svg = d3.select(\"#chart\").append(\"svg\")\n .attr(\"width\", width + margin.left + margin.right)\n .attr(\"height\", height + margin.top + margin.bottom)\n .append(\"g\")\n .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");\n\nvar sankey = d3.sankey()\n .nodeWidth(15)\n .nodePadding(10)\n .size([width, height]);\n\nvar path = sankey.link();\n\nd3.json(\"sankey.php\", function(energy) {\n\n sankey\n .nodes(energy[0].nodes)\n .links(energy[1].links)\n .layout(32);\n\n var link = svg.append(\"g\").selectAll(\".link\")\n .data(energy[1].links)\n .enter().append(\"path\")\n .attr(\"class\", \"link\")\n .attr(\"d\", path)\n .style(\"stroke-width\", function(d) { return Math.max(1, d.dy); })\n .sort(function(a, b) { return b.dy - a.dy; });\n\n link.append(\"title\")\n .text(function(d) { return d.source.name + \" to \" + d.target.name + \": \" + d.value + \" wrestler(s)\"; });\n\n var node = svg.append(\"g\").selectAll(\".node\")\n .data(energy[0].nodes)\n .enter().append(\"g\")\n .attr(\"class\", \"node\")\n .attr(\"transform\", function(d) { return \"translate(\" + d.x + \",\" + d.y + \")\"; })\n .call(d3.behavior.drag()\n .origin(function(d) { return d; })\n .on(\"dragstart\", function() { this.parentNode.appendChild(this); })\n .on(\"drag\", dragmove))\n .on(\"mouseover\", mouseover);\n\n node.append(\"rect\")\n .attr(\"height\", function(d) { return d.dy; })\n .attr(\"width\", sankey.nodeWidth())\n .style(\"fill\", function(d) { return d.color = color(d.name.replace(/ .*/, \"\")); })\n .style(\"stroke\", function(d) { return d3.rgb(d.color).darker(2); })\n .append(\"title\")\n .text(function(d) { return d.name + \"\\n\" + format(d.value); });\n\n node.append(\"text\")\n .attr(\"x\", -6)\n .attr(\"y\", function(d) { return d.dy / 2; })\n .attr(\"dy\", \".35em\")\n .attr(\"text-anchor\", \"end\")\n .attr(\"transform\", null)\n .text(function(d) { return d.name; })\n .filter(function(d) { return d.x < width / 2; })\n .attr(\"x\", 6 + sankey.nodeWidth())\n .attr(\"text-anchor\", \"start\");\n\n function dragmove(d) {\n d3.select(this).attr(\"transform\", \"translate(\" + d.x + \",\" + (d.y = Math.max(0, Math.min(height - d.dy, d3.event.y))) + \")\");\n sankey.relayout();\n link.attr(\"d\", path);\n }\n \n function mouseover(d) {\n var infoDiv = d3.select(\"#info\");\n var infoImg = d3.select(\"#infoimg\");\n infoDiv[0][0].style.textAlign = \"center\";\n infoDiv[0][0].innerHTML = \"<p style=\\\"font-size:24px\\\"><b>\" + d.fullname + \"</b></p>\";\n infoDiv[0][0].innerHTML += \"<img id=\\\"infoimg\\\" style=\\\"float:right;height:225px;width:250px;padding-right:25px\\\" src=\\\"http://media.graytvinc.com/images/600*600/wwe_world_wrestling_entertainment_logo_square.jpg\\\"/>\";\n }\n});\n\n</script>\n<div id=\"info\" style=\"width:1000px;height:300px;-webkit-border-radius: 20px;-moz-border-radius: 20px;border-radius: 20px;border:2px solid #000000;background:rgba(42,136,176,0.9);\"><img id=\"infoimg\" style=\"float:right\"/></div>\n</body>\n</html> " }, { "alpha_fraction": 0.5150039792060852, "alphanum_fraction": 0.5330755114555359, "avg_line_length": 30.5726318359375, "blob_id": "a102eeafbb15d29132cb8e2b655d146496863bb8", "content_id": "ba1e89f1799a7a8cc60ffd3e21363981a501c322", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 14996, "license_type": "no_license", "max_line_length": 121, "num_lines": 475, "path": "/php/rawmatches.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<meta charset=\"utf-8\">\n<title>Raw Matches By Wrestler</title>\n<style>\n\n#circle circle {\n fill: none;\n pointer-events: all;\n}\n\n.group path {\n fill-opacity: .5;\n}\n\n#tooltip {\n position: absolute;\n width: auto;\n height: auto;\n padding: 10px;\n background-color: white;\n -webkit-border-radius: 10px;\n -moz-border-radius: 10px;\n border-radius: 10px;\n -webkit-box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n -moz-box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n pointer-events: none;\n}\n\n#tooltip.hidden {\n display: none;\n}\n\n#tooltip p {\n margin: 0;\n font-family: sans-serif;\n font-size: 16px;\n line-height: 20px;\n}\n\n#winlosstooltip {\n position: absolute;\n width: auto;\n height: auto;\n padding: 10px;\n background-color: white;\n -webkit-border-radius: 10px;\n -moz-border-radius: 10px;\n border-radius: 10px;\n -webkit-box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n -moz-box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4);\n pointer-events: none;\n}\n\n#winlosstooltip.hidden {\n display: none;\n}\n\n#winlosstooltip p {\n margin: 0;\n font-family: sans-serif;\n font-size: 16px;\n line-height: 20px;\n}\n\npath.chord {\n stroke: #000;\n stroke-width: .25px;\n}\n\n#circle:hover path.fade {\n opacity: 0;\n}\n\n</style>\n<script src=\"http://d3js.org/d3.v3.min.js\"></script>\n\n<div id=\"checkboxes\" style=\"text-align:center\">\n <label><input type=\"checkbox\" title=\"Raw\" id=\"raw\" onchange=\"checkBoxClicked()\" checked/>Raw</label>\n <label><input type=\"checkbox\" title=\"Smackdown\" id=\"smackdown\" onchange=\"checkBoxClicked()\"/>Smackdown</label>\n <label><input type=\"checkbox\" title=\"PPV\" id=\"ppv\" onchange=\"checkBoxClicked()\"/>PPV</label>\n</div>\n<div id=\"graphContainer\" style=\"text-align:center\">\n <div id=\"tooltip\" class=\"hidden\">\n <p><b><span id=\"name\">Important Label Heading</span></b></p>\n <p><span id=\"value\">100</span></p>\n </div>\n <div id=\"winlosstooltip\" class=\"hidden\">\n <p><span id=\"source\">Important Label Heading</span></p>\n <p><span id=\"target\">100</span></p>\n </div>\n</div>\n<p style=\"text-align:center\">Built with <a href=\"http://d3js.org/\">d3.js</a></p>\n<p style=\"text-align:center\">Data from <a href=\"http://www.profightdb.com/\">profightdb.com</a></p>\n<script>\n\nvar width = 1200,\n height = 1280,\n outerRadius = Math.min(width, height) / 2 - 100,\n innerRadius = outerRadius - 24;\n \nvar color = d3.scale.category20();\n\nvar formatPercent = d3.format(\".1%\");\n\nvar arc = d3.svg.arc()\n .innerRadius(innerRadius)\n .outerRadius(outerRadius);\n\nfunction getDefaultLayout() {\n return d3.layout.chord()\n .padding(0.02)\n .sortSubgroups(d3.descending)\n .sortChords(d3.ascending);\n} \n \n//var layout = d3.layout.chord()\n// .padding(.02)\n// .sortSubgroups(d3.descending)\n// .sortChords(d3.ascending);\n\nvar path = d3.svg.chord()\n .radius(innerRadius);\n\nvar svg = d3.select(\"#graphContainer\").append(\"svg\")\n .attr(\"width\", width)\n .attr(\"height\", height)\n .append(\"g\")\n .attr(\"id\", \"circle\")\n .attr(\"transform\", \"translate(\" + width / 2 + \",\" + height / 2 + \")\");\n\nvar last_layout;\nvar current_data;\n \nsvg.append(\"circle\")\n .attr(\"r\", outerRadius);\n \nrenderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=Raw\");\n\n// data is as follows\n// [0] is the wrestlers list with number of matches\n// [1] is the wins matrix\n// [2] is the match matrix\nfunction renderData(url) {\nd3.json(url, function(data) {\n \n current_data = data;\n\n layout = getDefaultLayout();\n // Compute the chord layout.\n layout.matrix(data[2]);\n \n var oldGroup = svg.selectAll(\".group\").data(layout.groups(), function (d) {\n return d.index; \n //use a key function in case the \n //groups are sorted differently between updates\n });\n \n oldGroup.exit()\n .transition()\n .duration(1000)\n .attr(\"opacity\", 0)\n .remove(); //remove after transitions are complete\n\n // Add a group per neighborhood.\n var group = oldGroup\n .enter().append(\"g\")\n .attr(\"class\", \"group\");\n\n //group.append(\"title\");\n \n // Add a mouseover title.\n //oldGroup.select(\"title\").text(function(d, i) {\n // return data[0][i].name + \": \" + data[0][i].nummatches + \" matches\";\n //});\n\n // Add the group arc.\n group.append(\"path\")\n .attr(\"id\", function(d, i) { return \"group\" + i; })\n .style(\"fill\", function(d, i) { return color(data[0][i].name); })\n .on(\"mouseover\", function(d, i) {\n \n\t\t\t\t\t//Get this bar's x/y values, then augment for the tooltip\n var bbox = this.getBBox();\n var matrix = this.getScreenCTM();\n var pt = document.getElementsByTagName(\"svg\")[0].createSVGPoint();\n\t\t\t\t pt.x = bbox.x;\n pt.y = bbox.y;\n var origin = pt.matrixTransform(matrix);\n origin.x = origin.x + document.documentElement.scrollLeft || document.body.scrollLeft;\n origin.y = origin.y + document.documentElement.scrollTop || document.body.scrollTop;\n\n\t\t\t\t\t//Update the tooltip position and value\n\t\t\t\t\td3.select(\"#tooltip\")\n\t\t\t\t\t\t.style(\"left\", origin.x + \"px\")\n\t\t\t\t\t\t.style(\"top\", origin.y + \"px\")\t\t\t\t\t\t\n\t\t\t\t\t\t.select(\"#value\")\n\t\t\t\t\t\t.text(current_data[0][i].nummatches + \" matches\");\n d3.select(\"#tooltip\")\n .select(\"#name\")\n .text(current_data[0][i].name);\n\t\t\t \n\t\t\t\t\t//Show the tooltip\n\t\t\t\t\td3.select(\"#tooltip\").classed(\"hidden\", false);\n\n\t\t\t })\n\t\t\t .on(\"mouseout\", function() {\n\t\t\t \n\t\t\t\t\t//Hide the tooltip\n\t\t\t\t\td3.select(\"#tooltip\").classed(\"hidden\", true);\n\t\t\t\t\t\n\t\t\t });\n \n //update the paths to match the layout\n oldGroup.select(\"path\") \n .transition()\n .duration(1000)\n //.attr(\"opacity\", 0.5) //optional, just to observe the transition\n .attrTween(\"d\", arcTween( last_layout ))\n // .transition().duration(100).attr(\"opacity\", 1) //reset opacity\n ;\n \n group.append(\"svg:text\")\n .attr(\"xlink:href\", function (d, i) {\n return \"#group\" + data[0][i].name;\n })\n .attr(\"dy\", \".35em\")\n .text(function(d, i) { return data[0][i].name; });\n \n //position group labels to match layout\n oldGroup.select(\"text\")\n .transition()\n .duration(1000)\n .text(function(d, i) { return data[0][i].name; })\n .attr(\"transform\", function(d) {\n d.angle = (d.startAngle + d.endAngle) / 2;\n //store the midpoint angle in the data object\n \n return \"rotate(\" + (d.angle * 180 / Math.PI - 90) + \")\" +\n \" translate(\" + (innerRadius + 26) + \")\" + \n (d.angle > Math.PI ? \" rotate(180)\" : \" rotate(0)\"); \n //include the rotate zero so that transforms can be interpolated\n })\n .attr(\"text-anchor\", function (d) {\n return d.angle > Math.PI ? \"end\" : \"begin\";\n });\n\n\n // Add the chords.\n var chord = svg.selectAll(\"path.chord\")\n .data(layout.chords(), chordKey);\n \n var newChords = chord\n .enter().append(\"path\")\n .attr(\"class\", \"chord\")\n .on(\"mouseover\", function(d, i) {\n if (this.className.baseVal == \"chord\")\n { \n var origin = d3.mouse(this);\n \n //Update the tooltip position and value\n d3.select(\"#winlosstooltip\")\n .style(\"left\", d3.event.x + \"px\")\n .style(\"top\", d3.event.y + \"px\")\t\t\t\t\t\t\n .select(\"#target\")\n .text(current_data[0][d.target.index].name + \": \" + data[1][d.target.index][d.source.index] + \" wins\");\n d3.select(\"#winlosstooltip\")\n .select(\"#source\")\n .text(current_data[0][d.source.index].name + \": \" + data[1][d.source.index][d.target.index] + \" wins\");\n \n //Show the tooltip\n d3.select(\"#winlosstooltip\").classed(\"hidden\", false);\n }\n })\n .on(\"mouseout\", function() {\n \n //Hide the tooltip\n d3.select(\"#winlosstooltip\").classed(\"hidden\", true);\n \n });; \n//.attr(\"d\", path);\n \n newChords.append(\"title\");\n\n // Add an elaborate mouseover title for each chord.\n chord.select(\"title\").text(function(d) {\n return data[0][d.source.index].name\n + \": \" + data[1][d.source.index][d.target.index]\n + \" wins, \"\n + \"\\n\" + data[0][d.target.index].name \n + \": \" + data[1][d.target.index][d.source.index]\n + \" wins\";\n });\n \n //handle exiting paths:\n chord.exit().transition()\n .duration(1500)\n .attr(\"opacity\", 0)\n .remove();\n \n //update the path shape\n chord.transition()\n .duration(1500)\n //.attr(\"opacity\", 0.5) //optional, just to observe the transition\n .style(\"fill\", function(d) { return color(data[0][d.source.index].name); })\n .attrTween(\"d\", chordTween(last_layout))\n //.transition().duration(100).attr(\"opacity\", 1) //reset opacity\n ;\n \n //oldGroup.on(\"mouseover\", mouseover);\n oldGroup.on(\"mouseover\", function(d) {\n chord.classed(\"fade\", function (p) {\n //returns true if *neither* the source or target of the chord\n //matches the group that has been moused-over\n return ((p.source.index != d.index) && (p.target.index != d.index));\n });\n });\n \n last_layout = layout; //save for next update\n\n function mouseover(d, i) {\n chord.classed(\"fade\", function(p) {\n return p.source.index != i\n && p.target.index != i;\n });\n\n }\n });\n};\n\nfunction arcTween(oldLayout) {\n var oldGroups = {};\n if (oldLayout) {\n oldLayout.groups().forEach( function(groupData) {\n oldGroups[ groupData.index ] = groupData;\n });\n }\n \n return function (d, i) {\n var tween;\n var old = oldGroups[d.index];\n if (old) { //there's a matching old group\n tween = d3.interpolate(old, d);\n }\n else {\n //create a zero-width arc object\n var emptyArc = {startAngle:d.startAngle,\n endAngle:d.startAngle};\n tween = d3.interpolate(emptyArc, d);\n }\n \n return function (t) {\n return arc( tween(t) );\n };\n };\n }\n \nfunction chordKey(data) {\n return (data.source.index < data.target.index) ?\n data.source.index + \"-\" + data.target.index:\n data.target.index + \"-\" + data.source.index;\n}\n\nfunction chordTween(oldLayout) {\n //this function will be called once per update cycle\n \n //Create a key:value version of the old layout's chords array\n //so we can easily find the matching chord \n //(which may not have a matching index)\n \n var oldChords = {};\n \n if (oldLayout) {\n oldLayout.chords().forEach( function(chordData) {\n oldChords[ chordKey(chordData) ] = chordData;\n });\n }\n \n return function (d, i) {\n //this function will be called for each active chord\n \n var tween;\n var old = oldChords[ chordKey(d) ];\n if (old) {\n //old is not undefined, i.e.\n //there is a matching old chord value\n \n //check whether source and target have been switched:\n if (d.source.index != old.source.index ){\n //swap source and target to match the new data\n old = {\n source: old.target,\n target: old.source\n };\n }\n \n tween = d3.interpolate(old, d);\n }\n else {\n //create a zero-width chord object\n if (oldLayout) {\n var oldGroups = oldLayout.groups().filter(function(group) {\n return ( (group.index == d.source.index) ||\n (group.index == d.target.index) )\n });\n old = {source:oldGroups[0],\n target:oldGroups[1] || oldGroups[0] };\n //the OR in target is in case source and target are equal\n //in the data, in which case only one group will pass the\n //filter function\n \n if (old.source) {\n if (d.source.index != old.source.index ){\n //swap source and target to match the new data\n old = {\n source: old.target,\n target: old.source\n };\n }\n }\n else old = d;\n }\n else old = d;\n \n var emptyChord = {\n source: { startAngle: old.source.startAngle,\n endAngle: old.source.startAngle},\n target: { startAngle: old.target.startAngle,\n endAngle: old.target.startAngle}\n };\n tween = d3.interpolate( emptyChord, d );\n }\n\n return function (t) {\n //this function calculates the intermediary shapes\n return path(tween(t));\n };\n };\n}\n\nfunction checkBoxClicked() {\n var rawCheck = document.getElementById(\"raw\");\n var sdCheck = document.getElementById(\"smackdown\");\n var ppvCheck = document.getElementById(\"ppv\");\n \n \n if (ppvCheck.checked && sdCheck.checked && rawCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=All\");\n }\n else if (rawCheck.checked && sdCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=RawSmackdown\");\n }\n else if (ppvCheck.checked && rawCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=PPVRaw\");\n }\n else if (ppvCheck.checked && sdCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=PPVSmackdown\");\n }\n else if (rawCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=Raw\");\n }\n else if (sdCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=Smackdown\");\n }\n else if (ppvCheck.checked) {\n renderData(\"/wp-content/uploads/2015/05/rawmatchesphp.php?showname=PPV\");\n }\n else {\n alert(\"none\");\n }\n}\n\n</script>" }, { "alpha_fraction": 0.6584406495094299, "alphanum_fraction": 0.67538982629776, "avg_line_length": 33.462615966796875, "blob_id": "716af6326a10a518d317ee0e2bbaffb857d9f50b", "content_id": "a51cec5b6803612876f34aedace4fb1f77ec7357", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7375, "license_type": "no_license", "max_line_length": 214, "num_lines": 214, "path": "/python/TopLists.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "import pymysql as mdb\nimport math\nimport statistics\nimport datetime\nimport time\nimport collections\nimport csv\nimport json\n\ndef getWrestlerId(name):\n for wrestler in wrestlers:\n names = wrestler[1].split('; ');\n for wrestlername in names: \n if name == wrestlername:\n return wrestler[0];\n \n return 0;\n\ndef printWrestle(rating):\n name = \"\";\n for wrestler in wrestlers:\n if wrestler[0] == rating[1]:\n print(wrestler[1], end=\" \");\n name = wrestler[1];\n print(time.strftime('%Y-%m-%d', time.localtime(rating[2]))+ \" (\" + str(rating[1]) +\"): \" + str(round(rating[3])));\n return name;\n\ndef updateRankingDb(cur, wrestlerid, epochtime, rating):\n queryString = \"UPDATE ELO_current SET epochtime=\" + str(epochtime) + \", rating=\" + str(rating) + \" WHERE wrestlerid=\" + str(wrestlerid);\n cur.execute(queryString);\n \ndef updateTopDb(cur, toplist, rank, wrestlerid, epochtime, rating, name):\n queryString = \"UPDATE toplists SET wrestlerid=\" + str(wrestlerid) + \", epochtime=\" + str(epochtime) + \", rating=\" + str(rating) + \", name=\\\"\" + name + \"\\\" WHERE toplist='\" + toplist + \"' AND rank=\" + str(rank);\n cur.execute(queryString);\n \ndef insertTopDb(cur, toplist, rank, wrestlerid, epochtime, rating, name):\n queryString = \"INSERT INTO toplists VALUE ('\" + toplist + \"',\" + str(rank) + \",\" + str(wrestlerid) + \",\" + str(epochtime) + \",\" + str(rating) + \",\\\"\" + name + \"\\\");\";\n print(queryString);\n cur.execute(queryString);\n\nthirtydays = datetime.timedelta(days=30);\nthirtydaysbeforetoday = datetime.datetime.now() - thirtydays;\nfinalthirty = time.mktime(thirtydaysbeforetoday.timetuple());\nninteydaysbefore = thirtydaysbeforetoday - thirtydays - thirtydays;\nfinalninety = time.mktime(ninteydaysbefore.timetuple());\n\nconfigDataFile=open('config.json')\nconfigData = json.load(configDataFile)\nconfigDataFile.close()\n\nprint (\"Connecting\")\ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\nprint(\"Connected\");\ncur = con.cursor();\n\nqueryString = \"SELECT ID, Name FROM wrestlers_temp WHERE IsPrimary=1 GROUP BY ID\";\ncur.execute(queryString)\ncon.commit()\nwrestlers = cur.fetchall();\n\nqueryString = \"SELECT * FROM (SELECT * FROM ELO_ratings ORDER BY wrestlerid, epochtime DESC) x GROUP BY wrestlerid ORDER BY rating DESC\";\ncur.execute(queryString);\ncon.commit();\ncurrentRatings = cur.fetchall();\n\nqueryString = \"SELECT * FROM (SELECT * FROM ELO_ratings WHERE epochtime > \" + str(finalthirty) + \" ORDER BY wrestlerid, epochtime DESC) x GROUP BY wrestlerid ORDER BY rating DESC\";\ncur.execute(queryString);\ncon.commit();\nlast30days = cur.fetchall();\n\nqueryString = \"SELECT * FROM (SELECT * FROM ELO_ratings WHERE epochtime < \" + str(finalthirty) + \" ORDER BY wrestlerid, epochtime DESC) x GROUP BY wrestlerid ORDER BY rating DESC\";\ncur.execute(queryString);\ncon.commit();\nthirtydaysago = cur.fetchall();\n\nqueryString = \"SELECT * FROM (SELECT * FROM ELO_ratings WHERE epochtime > \" + str(finalninety) + \" ORDER BY wrestlerid, epochtime DESC) x GROUP BY wrestlerid ORDER BY rating DESC\";\ncur.execute(queryString);\ncon.commit();\nlast90days = cur.fetchall();\n\ncur.close();\ncon.close();\n\ntoptenthirtydays = list();\ntoptenninetydays = list();\ntoptenalltime = list();\nbottomtenninetydays = collections.deque();\nbottomtenalltime = collections.deque();\ndatabaseEntries = list();\nnewDatabaseEntries = list();\nallEntries = list();\nallDatabase = list();\n\nfor rating in currentRatings:\n allEntries.append(rating);\n if (len(toptenalltime) < 10):\n toptenalltime.append(rating);\n if (rating[2] > finalthirty and len(toptenthirtydays) < 10):\n toptenthirtydays.append(rating);\n if (rating[2] > finalninety):\n if (len(toptenninetydays) < 10):\n toptenninetydays.append(rating);\n bottomtenninetydays.append(rating);\n if (len(bottomtenninetydays) == 11):\n bottomtenninetydays.popleft();\n bottomtenalltime.append(rating);\n if (len(bottomtenalltime) == 11):\n bottomtenalltime.popleft();\n\nlast30dayschange = list();\n\nfor rating in last30days:\n oldRatingList = [x for x in thirtydaysago if rating[1] == x[1]];\n oldRating = 1500;\n if (len(oldRatingList) != 0):\n oldRating = oldRatingList[0][3];\n name = printWrestle(rating);\n #print(\"30 days ago: \" + str(oldRating));\n change = round(rating[3] - oldRating);\n #print(\"Change: \" + str(change));\n entry = [rating[1], rating[2], change, name];\n #print(entry);\n last30dayschange.append(entry);\nlast30dayschange.sort(key=lambda tup: tup[2], reverse=True);\n\nprint();\nprint(\"Active in last 90 days\");\nfor rating in last90days:\n name = printWrestle(rating);\n\nbiggestgainers = last30dayschange[0:10];\nbiggestlosers = list(reversed(last30dayschange[-10:]));\n\nprint();\nprint (\"Biggest Gainers\");\nindex = 1;\nfor rating in biggestgainers:\n print(rating);\n entry = [\"toptenbiggestgainer\", index, rating[0], rating[1], rating[2], rating[3]];\n databaseEntries.append(entry);\n index += 1;\nprint();\nprint (\"Biggest Losers\");\nindex = 1;\nfor rating in biggestlosers:\n print(rating);\n entry = [\"toptenbiggestlosers\", index, rating[0], rating[1], rating[2], rating[3]];\n databaseEntries.append(entry);\n index += 1;\n \nbottomtenalltime.reverse();\nbottomtenninetydays.reverse();\nprint(\"\");\nprint(\"Top ten last thirty days\");\nindex = 1;\nfor rating in toptenthirtydays:\n name = printWrestle(rating);\n entry = [\"toptenthirtydays\", index, rating[1], rating[2], rating[3], name];\n databaseEntries.append(entry);\n index += 1;\nindex = 1;\nprint(\"\");\nprint(\"Top ten last thirty days query\");\nfor rating in last30days:\n name = printWrestle(rating);\n index += 1;\n if (11 == index):\n break;\nindex = 1;\nprint(\"\");\nprint(\"Top ten last ninety days\");\nfor rating in toptenninetydays:\n name = printWrestle(rating);\n entry = [\"toptenninetydays\", index, rating[1], rating[2], rating[3], name];\n databaseEntries.append(entry);\n index += 1;\nindex = 1;\nprint(\"\");\nprint(\"Top ten all time\");\nfor rating in toptenalltime:\n name = printWrestle(rating);\n entry = [\"toptenalltime\", index, rating[1], rating[2], rating[3], name];\n databaseEntries.append(entry);\n index += 1;\nindex = 1;\nprint(\"\");\nprint(\"Bottom ten all time\");\nfor rating in bottomtenalltime:\n name = printWrestle(rating);\n entry = [\"bottomtenalltime\", index, rating[1], rating[2], rating[3], name];\n databaseEntries.append(entry);\n index += 1;\nindex = 1;\nprint(\"\");\nprint(\"Bottom ten last ninety days\");\nfor rating in bottomtenninetydays:\n name = printWrestle(rating);\n entry = [\"bottomtenninetydays\", index, rating[1], rating[2], rating[3], name];\n databaseEntries.append(entry);\n index += 1;\n \ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"]);\ncur = con.cursor(); \nfor rating in allEntries:\n index += 1;\n updateRankingDb(cur, rating[1], rating[2], rating[3]);\nfor entry in databaseEntries:\n index += 1;\n updateTopDb(cur, entry[0], entry[1], entry[2], entry[3], entry[4], entry[5]);\nfor entry in newDatabaseEntries:\n insertTopDb(cur, entry[0], entry[1], entry[2], entry[3], entry[4], entry[5]);\ncon.commit();\ncur.close();\ncon.close();\n" }, { "alpha_fraction": 0.44887882471084595, "alphanum_fraction": 0.46677637100219727, "avg_line_length": 25.27567481994629, "blob_id": "891dbf24472caa462c31d98b9f84f895a90e82e3", "content_id": "ed2f7d88788990967dc43c4d94804d19d27d25f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4861, "license_type": "no_license", "max_line_length": 83, "num_lines": 185, "path": "/js/winloss.js", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": " (function () {\n var margins = {\n top: 12,\n left: 48,\n right: 24,\n bottom: 24\n},\nlegendPanel = {\n width: 180\n};\nvar width = 750 - margins.left - margins.right - legendPanel.width;\nvar height = 150 - margins.top - margins.bottom;\n\nvar wrestlerID = get('ID');\n\n// Define 'div' for tooltips\nvar div = d3.select(\"#barContainer\")\n\t.append(\"div\") // declare the tooltip div \n\t.attr(\"class\", \"tooltip\") // apply the 'tooltip' class\n\t.style(\"opacity\", 0); \n \nd3.json(\"winloss.php?ID=\" + wrestlerID, function(error, data) {\n if (error) throw error;\n \n function getTooltip(d) {\n var tooltip = d.x + \" \";\n if (d.y == \"W\") {\n tooltip += \"wins by \";\n }\n else if (d.y == \"L\") {\n tooltip += \"losses by \";\n }\n else\n {\n tooltip += \"draws\";\n return tooltip;\n }\n \n if (d.series == \"pin\") {\n tooltip += \"pinfall\";\n }\n else if (d.series == \"sub\") {\n tooltip += \"submission\";\n }\n else if (d.series == \"KO\") {\n tooltip += \"knock out\";\n }\n else if (d.series == \"DQ\"){\n tooltip += \"disqualification\";\n }\n \n return tooltip;\n }\n \n var dataset = data;\n var series = dataset.map(function (d) {\n return d.name;\n });\n \n dataset = dataset.map(function (d, i) {\n return d.data.map(function (o, i2) {\n // Structure it so that your numeric\n // axis (the stacked amount) is y\n return {\n y: parseInt(o.count),\n x: o.result,\n series: series[i]\n };\n });\n }),\n stack = d3.layout.stack();\n \n stack(dataset);\n \n var dataset = dataset.map(function (group) {\n return group.map(function (d) {\n // Invert the x and y values, and y0 becomes x0\n return {\n x: d.y,\n y: d.x,\n x0: d.y0,\n series: d.series\n };\n });\n }),\n svg2 = d3.select('#barContainer')\n .append('svg')\n .attr('width', width + margins.left + margins.right + legendPanel.width)\n .attr('height', height + margins.top + margins.bottom)\n .append('g')\n .attr('transform', 'translate(' + margins.left + ',' + margins.top + ')')\n .on(\"mouseout\", function(d) {\n div.transition()\n\t\t\t\t.duration(200)\t\n\t\t\t\t.style(\"opacity\", 0);\n });\n \n \n var xMax = d3.max(dataset, function (group) {\n return d3.max(group, function (d) {\n return d.x + d.x0;\n });\n }),\n xScale = d3.scale.linear()\n .domain([0, xMax])\n .range([0, width]),\n months = dataset[0].map(function (d) {\n return d.y;\n }),\n _ = console.log(months),\n yScale = d3.scale.ordinal()\n .domain(months)\n .rangeRoundBands([0, height], .1),\n xAxis = d3.svg.axis()\n .scale(xScale)\n .orient('bottom'),\n yAxis = d3.svg.axis()\n .scale(yScale)\n .orient('left'),\n colours = d3.scale.category10(),\n groups = svg2.selectAll('g')\n .data(dataset)\n .enter()\n .append('g')\n .style('fill', function (d, i) {\n return colours(i);});\n \n var rects = groups.selectAll('rect')\n .data(function (d) {\n return d;\n })\n .enter()\n .append('rect')\n .attr('x', function (d) {\n return xScale(d.x0);\n })\n .attr('y', function (d, i) {\n return yScale(d.y);\n })\n .attr('height', function (d) {\n return yScale.rangeBand();\n })\n .attr('width', function (d) {\n return xScale(d.x);\n }).on(\"mouseover\", function(d) {\t\t\n div.transition()\n\t\t\t\t.duration(500)\t\n\t\t\t\t.style(\"opacity\", 0);\n\t\t\tdiv.transition()\n\t\t\t\t.duration(200)\t\n\t\t\t\t.style(\"opacity\", .9);\t\n\t\t\tdiv\t.html(getTooltip(d))\n .style(\"left\", (d3.event.pageX) + \"px\")\t\t\t \n\t\t\t\t.style(\"top\", (d3.event.pageY - 28) + \"px\");});\n \n svg2.append('g')\n .attr('class', 'axis')\n .attr('transform', 'translate(0,' + height + ')')\n .call(xAxis);\n \n svg2.append('g')\n .attr('class', 'axis')\n .call(yAxis);\n \n svg2.append('rect')\n .attr('fill', 'yellow')\n .attr('width', 160)\n .attr('height', 30 * dataset.length)\n .attr('x', width + margins.left)\n .attr('y', 0);\n \n series.forEach(function (s, i) {\n svg2.append('text')\n .attr('fill', 'black')\n .attr('x', width + margins.left + 8)\n .attr('y', i * 24 + 24)\n .text(s);\n svg2.append('rect')\n .attr('fill', colours(i))\n .attr('width', 60)\n .attr('height', 20)\n .attr('x', width + margins.left + 90)\n .attr('y', i * 24 + 6);\n});\n});})();" }, { "alpha_fraction": 0.6520127058029175, "alphanum_fraction": 0.6848517060279846, "avg_line_length": 34.622642517089844, "blob_id": "26ae95ac8f20f38b7dc6d34baf4c361e645905e6", "content_id": "7fa49b5f444344e4b6e8c9ea870b0cc9883c1afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 157, "num_lines": 53, "path": "/r/elohistogram.R", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "png(\"elograph.png\", height=400, width=800);\n\n# Load the winner csv file\nmydata <- read.csv(\"c:\\\\python34\\\\winner_scores_2.csv\");\n\n# Pick the first column in vector form for input into histogram\nmydata1 <- mydata[,1];\n\n# Create the histogram\nhistdata1 <- hist(mydata1, xlim=c(0,1), breaks=c(seq(0,1,0.05)));\n\nprint(\"average\");\nmean(mydata1);\n\nhistvalues1 <- histdata1$breaks;\nhistcounts1 <- histdata1$counts;\n\nheaders <- vector(mode=\"numeric\", length=0);\nvalues <- vector(mode=\"numeric\", length=0);\ndiffs <- vector(mode=\"numeric\", length=0);\ntotals <- vector(mode=\"numeric\", length=0);\n\nfor (i in 1:(length(histcounts1)/2))\n{\n numRight <- histcounts1[length(histcounts1) + 1 - i];\n header = (histvalues1[length(histvalues1) - i] + histvalues1[length(histvalues1) - i + 1]) / 2;\n total <- histcounts1[i] + numRight;\n percentCorrect <- numRight / total;\n headers <- append(headers, header);\n values <- append(values, percentCorrect);\n diffs <- append(diffs, (percentCorrect - header) * total);\n totals <- append(totals, total);\n}\n\ndatatable = rbind(headers, values, totals);\ndatatable;\n#layout(rbind(c(1),c(2)), heights=c(1,1));\n#layout(matrix(c(1,1), 2, 1, byrow=TRUE));\npar(mfrow = c(1, 2))\ndf.bar <- barplot(values, names.arg=headers, ylim=c(0.4,1), xpd=FALSE, col=c(\"darkblue\"), main=\"Actual Win Percentage per Bucket vs. Expected\", beside=TRUE);\n#legend(\"topright\", legend=c(\"Old\",\"New\"), fill=c(\"darkred\",\"darkgreen\"));\nlines(x = df.bar, y = headers);\npoints(x = df.bar, y = headers, col=\"black\", bg=\"yellow\", pch=21);\nbarplot(totals, names.arg=headers, ylim=c(0, 5000), col=c(\"darkblue\"), main=\"Number of Matches per Bucket\", beside=TRUE);\n#legend(\"topleft\", legend=c(\"Old\",\"New\"), fill=c(\"darkred\",\"darkgreen\"));\n\ntotalMatches = length(mydata)\ntotalMatches\nsum(diffs) / totalMatches\n\n# output histogram to disk and open it up\ngarbage <- dev.off();\nbrowseURL(\"elograph.png\");\n" }, { "alpha_fraction": 0.7843137383460999, "alphanum_fraction": 0.7843137383460999, "avg_line_length": 29.600000381469727, "blob_id": "da496d8423aac1538b1ee81f3df4b088bb171e9f", "content_id": "6bb0e3ed4026c79f25de4eb2101f292abbd752d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 76, "num_lines": 5, "path": "/README.md", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "# Kayfabermetrics\n\nThis repo holds the backing scripts, php, and html files for an old webpage.\n\nThere is a nifty Elo implementation in the Python code.\n" }, { "alpha_fraction": 0.5862506628036499, "alphanum_fraction": 0.598934531211853, "avg_line_length": 34.51801681518555, "blob_id": "92e0405b14d305f60bda13b2a1f4f2301b965546", "content_id": "4972b4959f1dd8b099d2b722b9b843cd232b21a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7884, "license_type": "no_license", "max_line_length": 277, "num_lines": 222, "path": "/python/ELO.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "import pymysql as mdb\nimport math\nimport statistics\nimport datetime\nimport time\nimport csv\nfrom decimal import *\nimport json\n\ndef getWrestlerId(name):\n for wrestler in wrestlers:\n names = wrestler[1].split('; ');\n for wrestlername in names: \n if name == wrestlername:\n return wrestler[0];\n input(\"Wrestler not found: \" + name);\n return 0;\n\ndef uploadRating(cur, matchid, wrestlerid, epochtime, rating, result, subresult):\n queryString = \"INSERT INTO ELO_ratings VALUES (\" + str(matchid) + \",\" + str(wrestlerid) + \",\" + str(epochtime) + \",\" + str(rating) + \",'\" + result + \"','\" + subresult + \"');\";\n cur.execute(queryString);\n print(queryString);\n\nconfigDataFile=open('config.json')\nconfigData = json.load(configDataFile)\nconfigDataFile.close()\n\nprint (\"Connecting\")\ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\nprint(\"Connected\");\ncur = con.cursor();\n\nqueryString = \"SELECT ID, GROUP_CONCAT(Name ORDER BY IsPrimary DESC SEPARATOR '; ') FROM wrestlers_temp GROUP BY ID\";\n#queryString = \"SELECT DISTINCT match_2 FROM wwematches2\";\n\n#print(queryString)\ncur.execute(queryString)\ncon.commit()\nwrestlers = cur.fetchall();\n\n#for wrestler in wrestlers:\n# print(wrestler);\n\nqueryString = \"SELECT _num, match_link AS Winner, match_2 AS Result, match_3_link AS Loser, titles AS Title, show_name, ppv, epochtime, match_type FROM wwematches2 WHERE show_name LIKE '%Monday Night Raw%' OR show_name LIKE '%SmackDown%' OR ppv = 'yes' ORDER BY epochtime ASC\";\n#queryString = \"SELECT DISTINCT titles FROM wwematches2\";\ncur.execute(queryString);\ncon.commit();\nmatches = cur.fetchall();\n\nqueryString = \"SELECT * FROM ELO_ratings\";\ncur.execute(queryString);\ncon.commit();\nexistingELO = cur.fetchall();\n\ncur.close();\ncon.close();\n\n#for match in matches:\n# print(match);\n#quit();\n\n# Smackdown, Raw, PPV, WrestleMania\nKValues = [15, 40, 50, 160];\nprint(\"K Values: \" + str(KValues));\ncurrentShowName = \"\";\ncurrentShowTime = datetime.MINYEAR;\ninclude = True;\ncurrentKValue = 0;\n\nratings = dict();\ndatabaseEntries = list();\nnewEntries = list();\n\nfor match in matches:\n if 'WWE Live' in match[5]:\n continue;\n if 'dark' == match[8].lower():\n continue;\n if (currentShowName != match[5] and currentShowTime != time.localtime(match[7])):\n #print(\"Show Name: \" + match[5]);\n #print(\"Date: \" + time.strftime('%Y-%m-%d', time.localtime(match[7])));\n keypress = 'y';\n #if ('Live' in match[5]):\n #keypress = input(\"~~~~~~~~~~Include?\");\n if ('y' == keypress):\n include = True;\n else:\n include = False;\n currentShowName = match[5];\n currentShowTime = time.localtime(match[7]);\n if ('WrestleMania' in match[5]):\n currentKValue = KValues[3];\n elif ('yes' == match[6]):\n currentKValue = KValues[2];\n elif ('Raw' in match[5]):\n currentKValue = KValues[1];\n else:\n currentKValue = KValues[0];\n #print('K Value: ' + str(currentKValue));\n if (not include):\n continue;\n matchid = match[0];\n #print(\"Match ID: \" + str(matchid));\n winnerScore = 1;\n matchResult = \"W\";\n loserResult = \"L\";\n subResult = \"pin\";\n if \"draw\" in match[2]:\n winnerScore = 0.5;\n matchResult = \"D\";\n loserResult = \"D\";\n subResult = \"draw\";\n elif \"sub\" in match[2]:\n winnerScore = 1.2;\n subResult = \"sub\";\n elif \"KO\" in match[2]:\n winnerScore = 1.2;\n subResult = \"KO\";\n elif \"DQ\" in match[2]:\n winnerScore = 0.8;\n subResult = \"DQ\";\n #print(\" Result weight: \" + str(winnerScore));\n winners = match[1].replace('\"', '').split('; ');\n losers = match[3].replace('\"', '').split('; ');\n winnerRating = 0;\n loserRating = 0;\n for winner in winners:\n if winner == \"\":\n print(match);\n #input(\"Blank Wreslter\");\n id = getWrestlerId(winner);\n if (id not in ratings.keys()):\n ratings[id] = 1500;\n winnerRating += ratings[id];\n for loser in losers:\n if loser == \"\":\n print(match);\n #input(\"Blank Wreslter\");\n id = getWrestlerId(loser);\n if (id not in ratings.keys()):\n ratings[id] = 1500;\n loserRating += ratings[id];\n winnerRating = winnerRating / len(winners);\n loserRating = loserRating / len(losers);\n winnerAdv = (loserRating - winnerRating) / 400.0;\n expectedWinnerScore = 1 / (1 + 10 ** winnerAdv);\n pointChange = round(currentKValue * (winnerScore - expectedWinnerScore));\n newWinnerRating = winnerRating + pointChange;\n newLoserRating = loserRating - pointChange;\n \n #print(\" Winning Team: \" + match[1] + \", Average Rating: \" + str(winnerRating) + \", Expected Score: \" + str(expectedWinnerScore) + \", New Rating: \" + str(newWinnerRating));\n #print(\" Losing Team: \" + match[3] + \", Average Rating: \" + str(loserRating) + \", New Rating: \" + str(newLoserRating));\n \n for winner in winners:\n id = getWrestlerId(winner);\n #print(\" Wrestler: \" + winner + \" ID: \" + str(id));\n #print(\" Old Rating: \" + str(ratings[id]));\n ratings[id] = ratings[id] + pointChange / len(winners);\n #print(\" New Rating: \" + str(ratings[id]));\n entry = [matchid, id, match[7], ratings[id], matchResult, subResult];\n #print(\" Database entry: \" + str(entry));\n databaseEntries.append(entry);\n for loser in losers:\n id = getWrestlerId(loser);\n #print(\" Wrestler: \" + loser + \" ID: \" + str(id));\n #print(\" Old Rating: \" + str(ratings[id]));\n ratings[id] = ratings[id] - pointChange / len(losers);\n #print(\" New Rating: \" + str(ratings[id]));\n entry = [matchid, id, match[7], ratings[id], loserResult, subResult];\n #print(\" Database entry: \" + str(entry));\n databaseEntries.append(entry);\n \nprint (statistics.mean(ratings.values()));\nfor w in sorted(ratings, key=ratings.get, reverse=True):\n for wrestler in wrestlers:\n if wrestler[0] == w:\n print(wrestler[1], end=\" \");\n print(str(w) + \": \" + str(round(ratings[w])));\n#print(wrestlers); \n#print(ratings);\n\nfor dbEntry in databaseEntries:\n existingEntry = [x for x in existingELO if x[0]==dbEntry[0] and x[1]==dbEntry[1]];\n if (len(existingEntry) > 1):\n print(existingEntry);\n #input();\n if (len(existingEntry) == 0):\n newEntries.append(dbEntry);\n print(\"New entry:\");\n print(\"Match id:\" + str(dbEntry[0]));\n print(\"Wrestler id:\" + str(dbEntry[1]));\n print(\"Date: \" + time.strftime('%Y-%m-%d', time.localtime(dbEntry[2])));\n #else:\n #if (round(existingEntry[0][3], 4) != round(Decimal(dbEntry[3]), 4)):\n #print(\"Changed Value:\");\n #print(existingEntry);\n #print(dbEntry);\n #print(round(existingEntry[0][3], 4));\n #print(round(Decimal(dbEntry[3]), 4));\n #input();\n\nwith open('ELO_database.csv', 'w', newline='') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['matchid', 'wrestlerid', 'epochtime', 'rating', 'result', 'subresult']);\n a.writerows(databaseEntries);\n\nprint(len(databaseEntries));\nprint(len(existingELO));\nprint(len(newEntries));\n\nprint (\"Connecting\")\ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\nprint(\"Connected\");\ncur = con.cursor();\n\nif (len(databaseEntries) - len(existingELO) == len(newEntries)):\n print (\"No changed entries, uploading to database\");\n for newEnt in newEntries:\n uploadRating(cur, newEnt[0], newEnt[1], newEnt[2], newEnt[3], newEnt[4], newEnt[5]);\n\ncur.close();\ncon.close();" }, { "alpha_fraction": 0.5892555713653564, "alphanum_fraction": 0.6032233238220215, "avg_line_length": 33.109947204589844, "blob_id": "8346cef5982032ef369a5057d26524b8f9b8f4dc", "content_id": "8eb438d2e0d7bcc7a8f1a0f22a1f13a3b2695c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6515, "license_type": "no_license", "max_line_length": 277, "num_lines": 191, "path": "/python/ELOCheck.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "import pymysql as mdb\nimport math\nimport statistics\nimport datetime\nimport time\nimport csv\nfrom decimal import *\nimport json\n\ndef getWrestlerId(name):\n for wrestler in wrestlers:\n names = wrestler[1].split('; ');\n for wrestlername in names: \n if name == wrestlername:\n return wrestler[0];\n input(\"Wrestler not found: \" + name);\n return 0;\n\nconfigDataFile=open('config.json')\nconfigData = json.load(configDataFile)\nconfigDataFile.close()\n\nprint (\"Connecting\")\ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\nprint(\"Connected\");\ncur = con.cursor();\n\nqueryString = \"SELECT ID, GROUP_CONCAT(Name ORDER BY IsPrimary DESC SEPARATOR '; ') FROM wrestlers_temp GROUP BY ID\";\n#queryString = \"SELECT DISTINCT match_2 FROM wwematches2\";\n\n#print(queryString)\ncur.execute(queryString)\ncon.commit()\nwrestlers = cur.fetchall();\n\n#for wrestler in wrestlers:\n# print(wrestler);\n\nqueryString = \"SELECT _num, match_link AS Winner, match_2 AS Result, match_3_link AS Loser, titles AS Title, show_name, ppv, epochtime, match_type FROM wwematches2 WHERE show_name LIKE '%Monday Night Raw%' OR show_name LIKE '%SmackDown%' OR ppv = 'yes' ORDER BY epochtime ASC\";\n#queryString = \"SELECT DISTINCT titles FROM wwematches2\";\ncur.execute(queryString);\ncon.commit();\nmatches = cur.fetchall();\n\nqueryString = \"SELECT * FROM ELO_ratings\";\ncur.execute(queryString);\ncon.commit();\nexistingELO = cur.fetchall();\n\ncur.close();\ncon.close();\n\n#for match in matches:\n# print(match);\n#quit();\n\n# Smackdown, Raw, PPV, WrestleMania\n# V1.0 Values: 10, 20, 40, 80\nKValues = [15, 40, 50, 160];\nprint(\"K Values: \" + str(KValues));\ncurrentShowName = \"\";\ncurrentShowTime = datetime.MINYEAR;\ninclude = True;\ncurrentKValue = 0;\n\nratings = dict();\ndatabaseEntries = list();\nnewEntries = list();\nwinnerScores = list();\n\nwins = 0;\nlosses = 0;\n\nfor match in matches:\n if 'WWE Live' in match[5]:\n continue;\n if 'dark' == match[8].lower():\n continue;\n if (currentShowName != match[5] and currentShowTime != time.localtime(match[7])):\n #print(\"Show Name: \" + match[5]);\n #print(\"Date: \" + time.strftime('%Y-%m-%d', time.localtime(match[7])));\n keypress = 'y';\n #if ('Live' in match[5]):\n #keypress = input(\"~~~~~~~~~~Include?\");\n if ('y' == keypress):\n include = True;\n else:\n include = False;\n currentShowName = match[5];\n currentShowTime = time.localtime(match[7]);\n if ('WrestleMania' in match[5]):\n currentKValue = KValues[3];\n elif ('yes' == match[6]):\n currentKValue = KValues[2];\n elif ('Raw' in match[5]):\n currentKValue = KValues[1];\n else:\n currentKValue = KValues[0];\n #print('K Value: ' + str(currentKValue));\n if (not include):\n continue;\n matchid = match[0];\n #print(\"Match ID: \" + str(matchid));\n winnerScore = 1;\n matchResult = \"W\";\n loserResult = \"L\";\n subResult = \"pin\";\n if \"draw\" in match[2]:\n winnerScore = 0.5;\n matchResult = \"D\";\n loserResult = \"D\";\n subResult = \"draw\";\n elif \"sub\" in match[2]:\n winnerScore = 1.2;\n subResult = \"sub\";\n elif \"KO\" in match[2]:\n winnerScore = 1.2;\n subResult = \"KO\";\n elif \"DQ\" in match[2]:\n winnerScore = 0.8;\n subResult = \"DQ\";\n #print(\" Result weight: \" + str(winnerScore));\n winners = match[1].replace('\"', '').split('; ');\n losers = match[3].replace('\"', '').split('; ');\n winnerRating = 0;\n loserRating = 0;\n for winner in winners:\n id = getWrestlerId(winner);\n if (id not in ratings.keys()):\n ratings[id] = 1500;\n winnerRating += ratings[id];\n for loser in losers:\n id = getWrestlerId(loser);\n if (id not in ratings.keys()):\n ratings[id] = 1500;\n loserRating += ratings[id];\n winnerRating = winnerRating / len(winners);\n loserRating = loserRating / len(losers);\n winnerAdv = (loserRating - winnerRating) / 400.0;\n expectedWinnerScore = 1 / (1 + 10 ** winnerAdv);\n scoreEntry = [expectedWinnerScore];\n if (expectedWinnerScore > 0.5):\n wins = wins + 1;\n else:\n losses = losses + 1;\n winnerScores.append(scoreEntry);\n pointChange = round(currentKValue * (winnerScore - expectedWinnerScore));\n newWinnerRating = winnerRating + pointChange;\n newLoserRating = loserRating - pointChange;\n \n #print(\" Winning Team: \" + match[1] + \", Average Rating: \" + str(winnerRating) + \", Expected Score: \" + str(expectedWinnerScore) + \", New Rating: \" + str(newWinnerRating));\n #print(\" Losing Team: \" + match[3] + \", Average Rating: \" + str(loserRating) + \", New Rating: \" + str(newLoserRating));\n \n for winner in winners:\n id = getWrestlerId(winner);\n #print(\" Wrestler: \" + winner + \" ID: \" + str(id));\n #print(\" Old Rating: \" + str(ratings[id]));\n ratings[id] = ratings[id] + pointChange / len(winners);\n #print(\" New Rating: \" + str(ratings[id]));\n entry = [matchid, id, match[7], ratings[id], matchResult, subResult];\n #print(\" Database entry: \" + str(entry));\n databaseEntries.append(entry);\n for loser in losers:\n id = getWrestlerId(loser);\n #print(\" Wrestler: \" + loser + \" ID: \" + str(id));\n #print(\" Old Rating: \" + str(ratings[id]));\n ratings[id] = ratings[id] - pointChange / len(losers);\n #print(\" New Rating: \" + str(ratings[id]));\n entry = [matchid, id, match[7], ratings[id], loserResult, subResult];\n #print(\" Database entry: \" + str(entry));\n databaseEntries.append(entry);\n \nprint(\"Avg. Elo: \" + str(statistics.mean(ratings.values())));\n\nprint(\"Correct Elo predictions: \" + str(wins));\nprint(\"Incorrect Elo predictions: \" + str(losses));\n\nwith open('ELO_database.csv', 'w', newline='') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['matchid', 'wrestlerid', 'epochtime', 'rating', 'result', 'subresult']);\n a.writerows(databaseEntries);\n\nwith open('winner_scores.csv', 'w', newline='') as fp:\n a = csv.writer(fp, delimiter=',');\n a.writerows(winnerScores);\n\nfor w in sorted(ratings, key=ratings.get, reverse=True):\n for wrestler in wrestlers:\n if wrestler[0] == w:\n print(wrestler[1], end=\" \");\n print(str(w) + \": \" + str(round(ratings[w])));\n" }, { "alpha_fraction": 0.5256233811378479, "alphanum_fraction": 0.5344590544700623, "avg_line_length": 25.10769271850586, "blob_id": "ec002fef8d89b123229f553acce715572ecb9a2b", "content_id": "83ee47beac6852e38deea8bdb10a054948d14cbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5093, "license_type": "no_license", "max_line_length": 105, "num_lines": 195, "path": "/php/rawmatchesphp.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<?php\n//Sample Database Connection Syntax for PHP and MySQL.\n//Connect To Database\nerror_reporting(E_ALL ^ E_NOTICE);\ndate_default_timezone_set('UTC');\n\n$showname = $_GET['showname'];\nif (null === $showname)\n{\n $showname = 'Raw';\n}\n\n$showfilter = '';\nif ('Raw' == $showname)\n{\n $showfilter = \"show_name LIKE '%Raw%'\";\n}\nelse if ('Smackdown' == $showname)\n{\n $showfilter = \"show_name LIKE '%Smackdown%'\";\n}\nelse if ('RawSmackdown' == $showname)\n{\n $showfilter = \"(show_name LIKE '%Raw%' OR show_name LIKE '%Smackdown%')\";\n}\nelse if ('PPV' == $showname)\n{\n $showfilter = \"ppv = 'yes'\";\n}\nelse if ('PPVRaw' == $showname)\n{\n $showfilter = \"(show_name LIKE '%Raw%' OR ppv = 'yes')\";\n}\nelse if ('PPVSmackdown' == $showname)\n{\n $showfilter = \"(show_name LIKE '%Smackdown%' OR ppv = 'yes')\";\n}\nelse if ('All' == $showname)\n{\n $showfilter = \"(show_name LIKE '%Raw%' OR show_name LIKE '%Smackdown%' OR ppv = 'yes')\";\n}\n\n$epochtime = time();\n$oneyear = 365 * 24 * 60 * 60;\n$lastyear = $epochtime - $oneyear;\n\n//echo date(DATE_RFC2822, $lastyear / 1000);\n\n$namereplace = array('Xavier' => 'Xavier Woods',\n 'Adrian Neville' => 'Neville',\n 'AJ' => 'A. J. Lee',\n 'R-Ziggler'=>'R-Truth',\n 'Alexander Rusev'=>'Rusev',\n 'Damien Sandow'=>'Damien Mizdow',\n 'Papi C'=>'Local Jobbers',\n 'Socorro'=>'Local Jobbers',\n 'Bu Ku Dao'=>'Local Jobbers',\n 'Kevin Kross'=>'Local Jobbers',\n 'Mad 1' => 'Local Jobbers',\n \"Lance Anoa'i\" => 'Local Jobbers',\n 'Spartan' => 'Local Jobbers',\n 'Rhett Titus' => 'Local Jobbers',\n 'Titan'=>'Local Jobbers',\n 'Tamina Snuka' => 'Tamina',\n 'Bad News Barrett' => 'Wade Barrett',\n 'King Barrett' => 'Wade Barrett');\n\n$matchcount = array();\n//$datelist = array();\n\ninclude 'sqldata.php';\n\n$usertable=\"wwematches2\";\n$yourfield = \"match_link\";\n\n$link = mysqli_connect($hostname,$username, $password, $dbname);\n\n# Check If Record Exists\n\n$query = \"SELECT * FROM $usertable WHERE $showfilter AND epochtime > $lastyear AND match_type != 'dark'\";\n//echo $query;\n$result = mysqli_query($link, $query);\n\nif($result)\n{\n //echo \"<h3>\" + mysqli_num_rows($result) + \"</h3>\";\n while($row = mysqli_fetch_array($result))\n {\n //if (!in_array($row[\"epochdate\"], $datelist))\n //{\n // array_push($datelist, $row[\"epochdate\"]);\n //}\n $winners = explode(\"; \", $row[\"match_link\"]);\n $losers = explode(\"; \", $row[\"match_3_link\"]);\n foreach($winners as &$winner)\n {\n if (array_key_exists($winner, $namereplace))\n { \n $winner = $namereplace[$winner];\n }\n if (array_key_exists($winner, $matchcount))\n {\n $matchcount[$winner]++;\n }\n else\n {\n $matchcount[$winner] = 1;\n }\n }\n foreach($losers as &$winner)\n {\n if (array_key_exists($winner, $namereplace))\n {\n $winner = $namereplace[$winner];\n }\n if (array_key_exists($winner, $matchcount))\n {\n $matchcount[$winner]++;\n }\n else\n {\n $matchcount[$winner] = 1;\n }\n }\n }\n arsort($matchcount);\n $numwrestlers = count($matchcount);\n $wrestlerlist = array_keys($matchcount);\n //echo $wrestlerlist;\n //echo var_dump($matchcount);\n //echo json_encode($matchcount);\n //arsort($datelist);\n //foreach ($datelist as &$dater)\n //{\n // echo \"<p>\".date(DATE_RFC2822, $dater / 1000).\"</p>\";\n //}\n \n $winsmatrix = array_fill(0, $numwrestlers, array_fill(0, $numwrestlers, 0));\n $matchmatrix = array_fill(0, $numwrestlers, array_fill(0, $numwrestlers, 0));\n \n mysqli_data_seek($result, 0);\n while($row = mysqli_fetch_array($result))\n { \n $winners = explode(\"; \", $row[\"match_link\"]);\n $losers = explode(\"; \", $row[\"match_3_link\"]);\n foreach($winners as &$winner)\n {\n if (array_key_exists($winner, $namereplace))\n { \n $winner = $namereplace[$winner];\n }\n foreach($losers as &$loser)\n {\n if (array_key_exists($loser, $namereplace))\n {\n $loser = $namereplace[$loser];\n }\n //echo \"$winner def $loser\";\n $winnerindex = array_search($winner, $wrestlerlist);\n $loserindex = array_search($loser, $wrestlerlist);\n if ( FALSE === $winnerindex || FALSE === $loserindex)\n {\n echo \"Not found\";\n }\n else\n {\n if (strpos($row[\"match_2\"], 'draw') === false)\n { \n $winsmatrix[$winnerindex][$loserindex] += 1;\n }\n $matchmatrix[$winnerindex][$loserindex] += 1;\n $matchmatrix[$loserindex][$winnerindex] += 1;\n }\n }\n }\n }\n \n //echo json_encode($winsmatrix);\n //echo json_encode($matchmatrix);\n $indexedmatchcount = array();\n $index = 0;\n foreach($matchcount as $key => $value)\n {\n $indexedmatchcount[$index] = array ('name' => $key, 'nummatches' => $value);\n $index++;\n }\n \n $jsonobject = array($indexedmatchcount, $winsmatrix, $matchmatrix);\n echo json_encode($jsonobject);\n}\nelse\n{\n //echo \"No result\";\n}\n?>\n \n" }, { "alpha_fraction": 0.6582781672477722, "alphanum_fraction": 0.6635761857032776, "avg_line_length": 28.627450942993164, "blob_id": "8c104d5edc742021ddc478020b30dc1a5d7a8460", "content_id": "659a68ee728ca0d0041d10e7e71e1fe77209501e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 244, "num_lines": 51, "path": "/php/sankey.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<?php\nerror_reporting(E_ALL ^ E_NOTICE);\ndate_default_timezone_set('UTC');\n\ninclude 'sqldata.php';\n\n$usertable=\"SankeyData\";\n$fedtable = \"Federations\";\n$elotable=\"ELO_ratings\";\n\n$link = mysqli_connect($hostname,$username, $password, $dbname);\n\n//$query = \"SELECT wrestlers_temp.Name, Federations.Name, Federations.Abbr, SankeyData.FedID, SankeyData.Dates, f2.Name AS DestName, f2.Abbr AS DestAbbr, SankeyData.DestID FROM SankeyData LEFT JOIN Federations ON SankeyData.FedID=Federations.ID\n//LEFT JOIN wrestlers_temp ON SankeyData.WrestlerID=wrestlers_temp.RecordID\n//LEFT JOIN Federations f2 ON SankeyData.DestID=f2.ID\";\n//echo $query;\n$query = \"SELECT FedID, DestID, COUNT(*) AS Count FROM SankeyData GROUP BY FedID, DestID\";\n$sankeyresult = mysqli_query($link, $query);\n\n$query = \"SELECT * FROM Federations\";\n$fedresult = mysqli_query($link, $query);\n\n$sankeyjson = array();\n$fedarray = array();\n$sankeyarray = array();\n\nif ($fedresult)\n{\n $index = 0;\n while($row = mysqli_fetch_array($fedresult))\n {\n $fedarray[$index] = array(\"name\" => $row['Abbr'], \"fullname\" => $row['Name']);\n $index++;\n }\n}\n$sankeyjson[0] = array(\"nodes\" => $fedarray);\n\nif($sankeyresult)\n{\n $index = 0;\n while($row = mysqli_fetch_array($sankeyresult))\n {\n $sankeyarray[$index] = array(\"source\" => (int)$row['FedID'], \"target\" => (int)$row['DestID'], \"value\" => (int)$row['Count']);\n $index++;\n }\n}\n\n$sankeyjson[1] = array(\"links\" => $sankeyarray);\n\necho json_encode($sankeyjson);\n?>" }, { "alpha_fraction": 0.6494765281677246, "alphanum_fraction": 0.690965473651886, "avg_line_length": 36.92647171020508, "blob_id": "3e305c7dc29a92f2bb99ae1e8987d7a7dec58e7f", "content_id": "fee5de345464f31818b507c5c3f2bb63f392a1e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2579, "license_type": "no_license", "max_line_length": 184, "num_lines": 68, "path": "/r/elohistogram2.R", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "png(\"elograph.png\", height=400, width=800);\n\n# Load the winner csv file\nmydata <- read.csv(\"c:\\\\python34\\\\winner_scores_2.csv\");\n\n# Pick the first column in vector form for input into histogram\nmydata1 <- mydata[,1];\nmydata2 <- mydata[,2];\n\n# Create the histogram\nhistdata1 <- hist(mydata1, xlim=c(0,1), breaks=c(seq(0,1,0.05)));\nhistdata2 <- hist(mydata2, xlim=c(0,1), breaks=c(seq(0,1,0.05)));\n\nprint(\"average\");\nmean(mydata1);\nmean(mydata2);\n\nhistvalues1 <- histdata1$breaks;\nhistcounts1 <- histdata1$counts;\nhistvalues2 <- histdata2$breaks;\nhistcounts2 <- histdata2$counts;\n\nheaders <- vector(mode=\"numeric\", length=0);\nvalues <- vector(mode=\"numeric\", length=0);\ndiffs <- vector(mode=\"numeric\", length=0);\ntotals <- vector(mode=\"numeric\", length=0);\nvalues2 <- vector(mode=\"numeric\", length=0);\ndiffs2 <- vector(mode=\"numeric\", length=0);\ntotals2 <- vector(mode=\"numeric\", length=0);\n\nfor (i in 1:(length(histcounts1)/2))\n{\n numRight <- histcounts1[length(histcounts1) + 1 - i];\n numRight2 <- histcounts2[length(histcounts2) + 1 - i];\n header = (histvalues1[length(histvalues1) - i] + histvalues1[length(histvalues1) - i + 1]) / 2;\n total <- histcounts1[i] + numRight;\n total2 <- histcounts2[i] + numRight2;\n percentCorrect <- numRight / total;\n percentCorrect2 <- numRight2 / total2;\n headers <- append(headers, header);\n values <- append(values, percentCorrect);\n values2 <- append(values2, percentCorrect2);\n diffs <- append(diffs, (percentCorrect - header) * total);\n totals <- append(totals, total);\n diffs2 <- append(diffs2, (percentCorrect2 - header) * total2);\n totals2 <- append(totals2, total2);\n}\n\ndatatable = rbind(headers, values, values2, totals);\ndatatable;\n#layout(rbind(c(1),c(2)), heights=c(1,1));\n#layout(matrix(c(1,1), 2, 1, byrow=TRUE));\npar(mfrow = c(1, 2))\ndf.bar <- barplot(rbind(values, values2), names.arg=headers, ylim=c(0.4,1), xpd=FALSE, col=c(\"darkred\",\"darkgreen\"), main=\"Actual Win Percentage per Bucket vs. Expected\", beside=TRUE);\nlegend(\"topright\", legend=c(\"Old\",\"New\"), fill=c(\"darkred\",\"darkgreen\"));\ncolMeans(df.bar);\nlines(x = colMeans(df.bar), y = headers);\npoints(x = colMeans(df.bar), y = headers, col=\"black\", bg=\"yellow\", pch=21);\nbarplot(rbind(totals, totals2), names.arg=headers, ylim=c(0, 5000), col=c(\"darkred\",\"darkgreen\"), main=\"Number of Matches per Bucket\", beside=TRUE);\nlegend(\"topleft\", legend=c(\"Old\",\"New\"), fill=c(\"darkred\",\"darkgreen\"));\n\ntotalMatches = length(mydata)\ntotalMatches\nsum(diffs) / totalMatches\n\n# output histogram to disk and open it up\ngarbage <- dev.off();\nbrowseURL(\"elograph.png\");\n" }, { "alpha_fraction": 0.6639004349708557, "alphanum_fraction": 0.6639004349708557, "avg_line_length": 25.88888931274414, "blob_id": "7ee316ae48065870548c3e6d79c2f9c489e62980", "content_id": "3c26a4f8c2c49731c1ceee4cea8b9da226f09d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/python/grabrss.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "import feedparser;\nimport importSQL;\n\nfeed = feedparser.parse('http://www.profightdb.com/rss.xml');\n\nfor post in feed.entries:\n if \"WWE\" in post.title:\n print (post.title + \": \" + post.link);\n importSQL.importUrl(post.link);" }, { "alpha_fraction": 0.4586946666240692, "alphanum_fraction": 0.465084433555603, "avg_line_length": 24.488372802734375, "blob_id": "496018ba9f9b61693a804a3bfb80d43570b21d6b", "content_id": "9c6da465a70dcadf694bbd4236cfc96bebe002be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2191, "license_type": "no_license", "max_line_length": 170, "num_lines": 86, "path": "/php/winloss.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<?php\nerror_reporting(E_ALL ^ E_NOTICE);\ndate_default_timezone_set('UTC');\n\n// Get ID from page\n$id = 1134;\n$id = $_GET['ID'];\n$name = '';\n$othernames = '';\n$currentELO = 0;\n$lastMatch = '';\n\ninclude 'sqldata.php';\n\n$usertable=\"wrestlers_temp\";\n$elotable=\"ELO_ratings\";\n\n$link = mysqli_connect($hostname,$username, $password, $dbname);\n\n# Check If Record Exists\n\n$names = array();\n$data = array();\n\n$query = \"SELECT * FROM $usertable WHERE ID = $id\";\n//echo $query;\n$result = mysqli_query($link, $query);\n\nif($result)\n{\n while($row = mysqli_fetch_array($result))\n {\n array_push($names, $row[\"Name\"]);\n }\n}\n\n$query = \"SELECT $elotable.result, $elotable.subresult, COUNT(*) AS count FROM $elotable WHERE $elotable.wrestlerid = $id GROUP BY $elotable.result, $elotable.subresult\";\n$result = mysqli_query($link, $query);\nif ($result)\n{\n while($row = mysqli_fetch_array($result))\n {\n $found = false;\n foreach($data as &$datum)\n {\n if ($datum[\"name\"] == $row[1])\n {\n foreach($datum[\"data\"] as &$resulttype)\n {\n if ($resulttype[\"result\"] == $row[0])\n {\n $resulttype[\"count\"] = $row[2];\n }\n unset($resulttype);\n }\n $found = true;\n }\n unset($datum);\n }\n if (!$found)\n {\n $datum = [\"name\" => $row[1], \"data\" => array()];\n $results = [\"result\" => \"W\", \"count\" => 0];\n array_push($datum[\"data\"], $results);\n $results = [\"result\" => \"L\", \"count\" => 0];\n array_push($datum[\"data\"], $results);\n $results = [\"result\" => \"D\", \"count\" => 0];\n array_push($datum[\"data\"], $results);\n foreach($datum[\"data\"] as &$resulttype)\n {\n if ($resulttype[\"result\"] == $row[0])\n {\n $resulttype[\"count\"] = $row[2];\n }\n unset($resulttype);\n }\n $data[] = $datum;\n }\n }\n}\nelse\n{\n echo mysqli_error($link);\n}\necho json_encode($data);\n?>" }, { "alpha_fraction": 0.53722083568573, "alphanum_fraction": 0.5554177165031433, "avg_line_length": 36.20769119262695, "blob_id": "3dff4d1e495c8acdeb8b712b60f788497959a5c8", "content_id": "5d96c775eb81bc209531bf78e9f19397a419b35e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4836, "license_type": "no_license", "max_line_length": 129, "num_lines": 130, "path": "/python/matchup.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "import pymysql as mdb\nimport math\nimport statistics\nimport datetime\nimport time\nimport csv\nfrom decimal import *\nimport json\n\ndef getWrestlerId(name):\n for wrestler in wrestlers:\n names = wrestler[1].split('; ');\n for wrestlername in names: \n if name == wrestlername:\n return wrestler[0];\n #input(\"Wrestler not found: \" + name);\n return 0;\n\ndef getELO(wrestlerId):\n ratings = [x[2] for x in currentELO if x[0] == wrestlerId];\n if (len(ratings) != 0):\n return round(ratings[0]);\n else:\n return 1500;\n\nconfigDataFile=open('config.json')\nconfigData = json.load(configDataFile)\nconfigDataFile.close()\n\nprint (\"Connecting\")\ncon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\nprint(\"Connected\");\ncur = con.cursor();\n\nqueryString = \"SELECT ID, Name FROM wrestlers_temp WHERE IsPrimary=1 GROUP BY ID\";\ncur.execute(queryString)\ncon.commit()\nwrestlers = cur.fetchall();\n\nqueryString = \"SELECT * FROM ELO_current\";\ncur.execute(queryString)\ncon.commit()\ncurrentELO = cur.fetchall();\n\nmatches = [[[\"Big E. Langston\", \"Xavier Woods\", \"Kofi Kingston\"],[\"Kalisto\", \"Sin Cara\"],[\"Jimmy Uso\", \"Jey Uso\"]],\n [[\"Charlotte\"], [\"Paige\"]],\n [[\"Jack Swagger\"], [\"Alberto Del Rio\"]],\n [[\"Dean Ambrose\"], [\"Kevin Owens\"]],\n [[\"Bubba Ray Dudley\", \"D-Von Dudley\", \"Tommy Dreamer\", \"Rhyno\"],[\"Bray Wyatt\", \"Luke Harper\", \"Braun Strowman\", \"Erick Rowan\"]],\n [[\"Ryback\"],[\"Rusev\"]],\n [[\"Sheamus\"], [\"Roman Reigns\"]]];\n\nf = open('preview.txt', 'w');\n\nfor match in matches:\n teamELOs = list();\n teamQs = list();\n teamNames = list();\n for team in match:\n name = \"\";\n members = len(team);\n index = 0;\n teamELO = 0;\n for person in team:\n id = getWrestlerId(person);\n elo = getELO(id);\n print (person + \"(\" + str(id) + \"): \" + str(elo));\n teamELO += elo;\n name += person;\n if members == 2:\n if index == 0:\n name += \" and \";\n if members > 2:\n if index < members - 2:\n name += \", \";\n if index == members - 2:\n name += \", and \";\n index += 1;\n #print(\"Team ELO: \" + str(teamELO));\n average = teamELO / len(team);\n Q = math.pow(10.0, (average / 400));\n print(\" Average: \" + str(average));\n #print(\" Q: \" + str(Q));\n teamELOs.append(average);\n teamQs.append(Q);\n teamNames.append(name);\n totalELO = sum(teamELOs);\n totalQ = sum(teamQs);\n #print (\"Total ELO: \" + str(totalELO));\n #print(\"Total Q:\" + str(totalQ));\n index = 0;\n winningIndex = 0;\n winningPercent = 0;\n for eachq in teamQs:\n winchance = eachq / totalQ;\n print(str(match[index]));\n print(\"Chance of winning: \" + str(winchance));\n if winchance > winningPercent:\n winningIndex = index;\n winningPercent = winchance;\n index += 1;\n print(\"Average ELO for match: \" + str(totalELO / len(match)));\n print(\"\");\n \n print(\"<p style=\\\"font-size:32px;font-weight:bold;text-align:center\\\">\", end=\"\", file=f);\n print(' vs. '.join(teamNames), end=\"\", file=f);\n print(\"</p>\", file=f);\n print(\"<p style=\\\"font-size:28px;font-weight:bold;text-align:center;margin-top:0\\\">Title</p>\", file=f);\n print(\"<p style=\\\"font-size:24px;padding:10px;margin:0\\\">Elo Ratings:</p>\", file=f);\n print(\"<ul style=\\\"line-height:90%;margin:0;margin-left:40px;padding:0\\\">\", file=f);\n for x in range(0, len(teamNames)):\n print(\"<li>\" + teamNames[x] + \": \" + str(round(teamELOs[x])) + \"</li>\", file=f);\n print(\"<li><b>Average Match Elo: \" + str(round(totalELO / len(match))) + \"</b></li>\", file=f);\n print(\"</ul>\", file=f);\n print(\"<p style=\\\"font-size:24px;padding:10px;margin-top:40px;margin-bottom:0px\\\">Elo Breakdown:</p>\", file=f);\n print(\"<p style=\\\"margin-left:40px;margin-bottom:10px\\\">\", file=f);\n print(\"breakdown\", file=f);\n print(\"<ul style=\\\"line-height:90%;margin:0;margin-left:80px;margin-bottom:10px;padding:0\\\">\", file=f);\n print(\"<li></li>\", file=f);\n print(\"</ul>\", file=f);\n print(\"<strong style=\\\"font-size:22px;margin-top:10px\\\">Elo Prediction: \", end=\"\", file=f);\n print(teamNames[winningIndex] + \" (\" + str(round(winningPercent * 100)) + \"% chance of winning)\", end=\"\", file=f);\n print(\"</strong></p>\", file=f);\n print(\"<p style=\\\"font-size:24px;padding:10px;margin-top:40px;margin-bottom:0px\\\">Author Breakdown:</p>\", file=f);\n print(\"<p style=\\\"margin-left:40px;margin-bottom:10px\\\">\", file=f);\n print(\"<strong style=\\\"font-size:22px;margin-top:10px\\\">Author's Prediction: </strong>\", file=f);\n print(\"</p><hr>\", file=f); \n print(\"\", file=f);\n \nf.close();" }, { "alpha_fraction": 0.6830461025238037, "alphanum_fraction": 0.6867722272872925, "avg_line_length": 31.048507690429688, "blob_id": "c69602d538e67ed7229c2d03a6b53f0afd77b936", "content_id": "3b083c59f7a851c36700d67ff1a96ccb9fe1e053", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8588, "license_type": "no_license", "max_line_length": 147, "num_lines": 268, "path": "/python/importSQL.py", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport pymysql as mdb\nfrom optparse import OptionParser\nimport json\nimport urllib.request, urllib.error, urllib.parse\nimport urllib.request, urllib.parse, urllib.error\nimport sys\nimport zlib\nimport time\n\n\ndef getRequiredConfigData(configData, configName):\n\tif configData[configName] is None:\n\t\tprint((\"Missing required option: \" + configName))\n\t\tprint(\"This option needs to be in a config file, or supplied on the commandline\")\n\t\tsys.exit(1)\n\ndef getOptionalConfigData(configData, configName, default):\n\tif default is not None and configName not in configData:\n\t\tconfigData[configName] = default\n\ndef getConfigOptions(configData): \n\n\t#print(configData)\n\n\t# Get required config data\n\tgetRequiredConfigData(configData, \"sourceUUID\")\n\tgetRequiredConfigData(configData, \"table\")\n\tgetRequiredConfigData(configData, \"database\")\n\n\tgetRequiredConfigData(configData, \"ioUserID\")\n\tgetRequiredConfigData(configData, \"ioAPIKey\")\n\tgetRequiredConfigData(configData, \"inputUrl\")\n\n\t# Grab optional configuration parameters, use defaults if they don't exist\n\tgetOptionalConfigData(configData, \"host\", \"localhost\")\n\tgetOptionalConfigData(configData, \"port\", 3306)\n\tgetOptionalConfigData(configData, \"username\", None)\n\tgetOptionalConfigData(configData, \"password\", None)\n\tgetOptionalConfigData(configData, \"crawl\", False);\n\n\treturn configData\n\n# Grab the data from a crawler snapshot\ndef grabFromCrawlSnapshot(sourceUUID, ioUserID, ioAPIKey):\n\turlAuthParams = urllib.parse.urlencode({\"_user\": ioUserID, \"_apikey\": ioAPIKey})\n\n\tconnectorUrl = 'https://api.import.io/store/data/' + sourceUUID + \"?\" + urlAuthParams\n\tconnectorResponse = json.loads(urllib.request.urlopen(connectorUrl).read())\n\tsnapshotGuid = connectorResponse[\"snapshot\"]\n\n\t#Have to use gzip encoding for this\n\trequest = urllib.request.Request('https://api.import.io/store/data/' + sourceUUID + \"/_attachment/snapshot/\" + snapshotGuid + \"?\" + urlAuthParams)\n\trequest.add_header('Accept-encoding', 'gzip')\n\tresponse = urllib.request.urlopen(request)\n\tsnapshotResponse = json.loads(zlib.decompress(response.read(), 16+zlib.MAX_WBITS))\n\n\tcrawledPages = snapshotResponse[\"tiles\"][0][\"results\"][0][\"pages\"]\n\n\tresults = []\n\n\tfor page in crawledPages:\n\t\tresults.extend(page[\"results\"])\n\n\treturn results\n\t\n\n# Grab the data from import.io\ndef importRESTQuery(sourceUUID, inputUrl, ioUserID, ioAPIKey):\n\turlParams = urllib.parse.urlencode({\"input/webpage/url\": inputUrl, \"_user\": ioUserID, \"_apikey\": ioAPIKey})\n\turl = 'https://api.import.io/store/data/' + sourceUUID + '/_query?' + urlParams\n\n\tresponse = urllib.request.urlopen(url).read().decode('utf-8')\n\tjsonresponse = json.loads(response);\n\t#print (jsonresponse);\n\treturn jsonresponse[\"results\"];\n\n# Convert the data to a reasonable format and stick it in SQL\ndef pushToSQL(configData, results):\n\n\tfieldMappings = None\n\n\tif \"mapping\" in configData:\n\t\tfieldMappings = configData[\"mapping\"]\n\t\tsqlFieldMapping = [];\n\n\t\tfor mapping in fieldMappings:\n\t\t\tsqlFieldMapping.append(fieldMappings[mapping])\n\n\t\tsqlFieldMappingString = \", \".join(sqlFieldMapping)\n\t\t#print (sqlFieldMappingString)\n\n\t\t#print((\"Mappings: %s\" % fieldMappings))\n\n\n\tcon = None\n\ttry:\n\t\teventno = configData[\"inputUrl\"].split('-');\n\t\teventno = eventno[-1].split('.');\n\t\teventno = eventno[0];\n\t\t\n\t\tprint(\"Parsing event number \" + eventno);\n\t\t\t\t\t\t\n\t\t#print (\"Connecting\")\n\t\tcon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\n\t\t#print(\"Connected\");\n\t\tcur = con.cursor()\n\t\t\n\t\tfor result in results:\n\t\t\tvalues = []\n\t\t\tif(fieldMappings is not None):\n\t\t\t\t# Get the values for each row based on the mapping that we supplied in config.json\n\t\t\t\tfor mapping in fieldMappings:\n\t\t\t\t\tif result[mapping] is not None:\n\t\t\t\t\t\tvalues.append(\"'\"+result[mapping]+\"'\")\n\t\t\telse:\n\t\t\t\t# Get the values from the import.io source (assume the field names are identical)\n\t\t\t\tsqlFieldMapping = [];\n\t\t\t\tfor key in result:\n\t\t\t\t\tif (type(result[key]) is str):\n\t\t\t\t\t\tresult[key] = result[key].replace('\"','')\n\t\t\t\t\tif (key == \"no_number\"):\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\tif (key == \"no_number/_source\"):\n\t\t\t\t\t\tsqlFieldMapping.append(\"no_number\")\n\t\t\t\t\t\tvalues.append(\"\\\"\"+result[key]+\"\\\"\")\n\t\t\t\t\telif (key == \"dateurl\"):\n\t\t\t\t\t\tsqlFieldMapping.append(\"dateurl\")\n\t\t\t\t\t\tsqlFieldMapping.append(\"datestring\");\n\t\t\t\t\t\tsqlFieldMapping.append(\"epochtime\")\n\t\t\t\t\t\tdateurl = result[key];\n\t\t\t\t\t\tdate = dateurl.split('/');\n\t\t\t\t\t\tdate = date[-1].split('.');\n\t\t\t\t\t\tdatenumber = time.strptime(date[0], \"%m-%d-%Y\")\n\t\t\t\t\t\tepochtime = time.mktime(datenumber);\n\t\t\t\t\t\tvalues.append(\"\\\"\"+ dateurl +\"\\\"\")\n\t\t\t\t\t\tvalues.append(\"\\\"\"+ date[0] +\"\\\"\")\n\t\t\t\t\t\tvalues.append(\"\\\"\"+ str(epochtime) +\"\\\"\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tsqlFieldMapping.append(key.replace(\"/_\",\"_\"))\n\t\t\t\t\t\tif (type(result[key]) is list):\n\t\t\t\t\t\t\tvalues.append(\"\\\"\" + \"; \".join(result[key]).replace('\"','') + \"\\\"\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvalues.append(\"\\\"\"+result[key]+\"\\\"\")\n\t\t\t\tif \"titles\" not in sqlFieldMapping:\n\t\t\t\t\tsqlFieldMapping.append(\"titles\");\n\t\t\t\t\tvalues.append(\"\\\"\\\"\")\n\t\t\t\tif \"match_type\" not in sqlFieldMapping:\n\t\t\t\t\tsqlFieldMapping.append(\"match_type\");\n\t\t\t\t\tvalues.append(\"\\\"\\\"\")\n\t\t\t\tif \"duration\" not in sqlFieldMapping:\n\t\t\t\t\tsqlFieldMapping.append(\"duration\");\n\t\t\t\t\tvalues.append(\"\\\"\\\"\")\n\t\t\t\tif \"_pageUrl\" not in sqlFieldMapping:\n\t\t\t\t\tsqlFieldMapping.append(\"_pageUrl\");\n\t\t\t\t\tsqlFieldMapping.append(\"event_no\");\n\t\t\t\t\tvalues.append(\"\\\"\"+ configData[\"inputUrl\"] +\"\\\"\")\n\t\t\t\t\tvalues.append(\"\\\"\" + eventno + \"\\\"\")\n\t\t\t\tsqlFieldMappingString = \", \".join(sqlFieldMapping)\n\n\t\t\tsqlFieldValuesString = \", \".join(values)\n\t\t\t#print((\"row data: %s\" % sqlFieldValuesString))\n\t\t\twrestler1 = result[\"match_link\"];\n\t\t\twrestler2 = result[\"match_3_link\"];\n\t\t\tif (type(wrestler1) is list):\n\t\t\t\twrestler1 = \"; \".join(wrestler1);\n\t\t\tif (type(wrestler2) is list):\n\t\t\t\twrestler2 = \"; \".join(wrestler2);\n\t\t\t\n\t\t\tprint(wrestler1 + \" \" + result[\"match_2\"] + \" \" + wrestler2);\n\n\t\t\tqueryString = \"INSERT INTO \" + configData[\"table\"] + \" (\" + sqlFieldMappingString + \") VALUES(\"+sqlFieldValuesString+\");\"\n\n\t\t\t#print(queryString)\n\t\t\tcur.execute(queryString)\n\t\t\tcon.commit()\n\n\t\tcur.close()\n\n\t\t#print((\"%s\" % (configData[\"host\"])))\n\n\texcept (RuntimeError, TypeError, NameError, mdb.Error) as e:\n\t\tprint(e);\n\tfinally:\n\t\tif con:\n\t\t\tcon.rollback()\n\t\t\tcon.close()\n\t\t#sys.exit(1)\n\t\t\ndef checkIfExists(configData):\n\teventno = configData[\"inputUrl\"].split('-');\n\teventno = eventno[-1].split('.');\n\teventno = eventno[0];\n\tcon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\n\tcur = con.cursor()\n\tqueryString = \"SELECT * FROM \" + configData[\"table\"] + \" WHERE event_no=\" + eventno;\n\t#print(queryString)\n\tcur.execute(queryString)\n\tcon.commit()\n\t\n\trows = cur.fetchall();\n\tcur.close();\n\tcon.close();\n\t\n\tif (0 != len(rows)):\n\t\tprint(\"Event already uploaded!\");\n\t\treturn False;\n\telse:\n\t\tprint (\"New event!\");\n\t\treturn True;\n\ndef checkIfEventExists(configData, table, eventno):\n\tcon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\n\tcur = con.cursor()\n\tqueryString = \"SELECT * FROM \" + table + \" WHERE event_no=\" + eventno;\n\t#print(queryString)\n\tcur.execute(queryString)\n\tcon.commit()\n\t\n\trows = cur.fetchall();\n\tcur.close();\n\tcon.close();\n\t\n\tif (0 != len(rows)):\n\t\tprint(\"Event already uploaded!\");\n\t\treturn False;\n\telse:\n\t\tprint (\"New event!\");\n\t\treturn True;\n\t\ndef getEventNumbers(configData, table):\n\tcon = mdb.connect(host=configData[\"host\"], user=configData[\"username\"], passwd=configData[\"password\"], db=configData[\"database\"])\n\tcur = con.cursor()\n\tqueryString = \"SELECT DISTINCT event_no FROM \" + table;\n\t#print(queryString)\n\tcur.execute(queryString)\n\tcon.commit()\n\t\n\trows = cur.fetchall();\n\tcur.close();\n\tcon.close();\n\t\n\treturn rows;\n\ndef doImport(configData):\n\tif (checkIfExists(configData) == False):\n\t\treturn;\n\n\tif configData[\"crawl\"] == True:\n\t\tresults = grabFromCrawlSnapshot(configData[\"sourceUUID\"], configData[\"ioUserID\"], configData[\"ioAPIKey\"])\n\telse:\n\t\tresults = importRESTQuery(configData[\"sourceUUID\"], configData[\"inputUrl\"], configData[\"ioUserID\"], configData[\"ioAPIKey\"]);\n\tprint((\"Recieved %d rows of data\" % (len(results))))\n\tpushToSQL(configData, results)\n\ndef importUrl(url):\n\ttry:\n\t\tconfigDataFile=open('config.json')\n\t\tconfigData = json.load(configDataFile)\n\t\tconfigDataFile.close()\n\t\t#print (\"CONFIG FOUND, YAY!\")\n\texcept IOError:\n\t\tprint(('NO CONFIG FILE FOUND, going to use defaults', 'config.json'))\n\t\n\tconfigData = getConfigOptions(configData)\n\tconfigData[\"inputUrl\"] = url;\n\tdoImport(configData)" }, { "alpha_fraction": 0.605241596698761, "alphanum_fraction": 0.6330876350402832, "avg_line_length": 17.78461456298828, "blob_id": "f616dfe41fba34774bc6a50e0642351ca016feb9", "content_id": "3eefb47d505efacb27679565b706a6deb430e660", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 89, "num_lines": 65, "path": "/php/profile.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n <head>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/wp-content/themes/justwrite/style.css\">\n </head>\n<style>\n.axis path,\n.axis line {\n fill: none;\n stroke: #000;\n shape-rendering: crispEdges;\n}\n\n.x.axis path {\n display: none;\n}\n\n.line {\n fill: none;\n stroke: steelblue;\n stroke-width: 1.5px;\n}\n\ndiv.tooltip {\n position: absolute;\t\n text-align: center;\t\n width: 125px;\t\n height: autp;\t\t\n padding: 2px;\t\n font: 12px sans-serif;\t\n background: lightsteelblue;\t\n border: 0px;\t\t\t\t\t\n border-radius: 8px;\n /* pointer-events: none;\tThis line needs to be removed */\n\t\n}\n\n.bigbox { \n float: left;\n width: 250px;\n height: 150px;\n margin: 5px 5px 5px 5px;\n padding: 5px;\n border-width: 3px;\n border-style: solid;\n border-color: rgba(0,0,0,.2);\n text-align: center;\n vertical-align: middle;\n color: white;\n}\n\nbody {\n background-color: #eaeaea;\n}\n\n</style> \n<body>\n<?php include 'profilephp.php'?>\n<div id=\"barContainer\"></div>\n<div id=\"graphContainer\" style=\"clear:both\"></div>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js\"></script>\n<script src=\"elograph.js\"></script>\n<script src=\"winloss.js\"></script>\n</body>\n</html> " }, { "alpha_fraction": 0.5075488686561584, "alphanum_fraction": 0.5226465463638306, "avg_line_length": 22.715789794921875, "blob_id": "a89ca0e308be5054ad24cee11027d61f8fe5eac1", "content_id": "84e8e6fcabbd044fdf20bfb6961d912f496c469f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2252, "license_type": "no_license", "max_line_length": 191, "num_lines": 95, "path": "/php/profilephp.php", "repo_name": "KaiserKyle/Kayfabermetrics", "src_encoding": "UTF-8", "text": "<?php\nerror_reporting(E_ALL ^ E_NOTICE);\ndate_default_timezone_set('UTC');\n\n// Get ID from page\n$id = 1134;\n$id = $_GET['ID'];\n$name = '';\n$othernames = '';\n$currentELO = 0;\n$lastMatch = '';\n$nummatches = 0;\n$wins = 0;\n$losses = 0;\n$draws = 0;\n\ninclude 'sqldata.php';\n\n$usertable=\"wrestlers_temp\";\n$elotable=\"ELO_ratings\";\n\n$link = mysqli_connect($hostname,$username, $password, $dbname);\n\n# Check If Record Exists\n\n$query = \"SELECT * FROM $usertable WHERE ID = $id\";\n//echo $query;\n$result = mysqli_query($link, $query);\n\nif($result)\n{\n while($row = mysqli_fetch_array($result))\n {\n if ($row[\"IsPrimary\"] == 1)\n {\n $name = $row[\"Name\"];\n }\n else\n {\n $othernames += $row[\"Name\"] + '; ';\n }\n }\n}\n\n$query = \"SELECT * FROM $elotable WHERE wrestlerid = $id ORDER BY epochtime DESC LIMIT 10\";\n$result = mysqli_query($link, $query);\nif ($result)\n{\n while($row = mysqli_fetch_array($result))\n {\n if (0 == $currentELO)\n {\n $currentELO = round($row['rating'], 0);\n }\n if ('' == $lastMatch)\n {\n $lastMatch = date(\"F j, Y\", $row['epochtime']);\n }\n }\n}\n\n$query = \"SELECT result, COUNT(*) AS count FROM $elotable WHERE wrestlerid = $id GROUP BY result\";\n$result = mysqli_query($link, $query);\nif ($result)\n{\n while($row = mysqli_fetch_array($result))\n {\n $nummatches += $row[1];\n if (\"W\" == $row[0])\n {\n $wins = $row[1];\n }\n else if (\"L\" == $row[0])\n {\n $losses = $row[1];\n }\n else\n {\n $draws = $row[1];\n }\n }\n}\n\necho \"<div class=\\\"page-template-normal single-template-1\\\">\";\necho \"<h2 class=\\\"title\\\">\" . $name . \"</h2>\";\nif ('' != $othernames)\n{\n echo \"Also known as: \" . $othernames;\n}\necho \"<h2 class = \\\"title\\\">\" . $wins . \"-\" . $losses . \"-\" . $draws . \"</h2>\";\necho \"<span>Last Match: \" . $lastMatch . \"</span><br>\";\necho \"<span>Total number of matches: \" . $nummatches . \"</span><br>\";\necho \"</div>\";\necho \"<div class=\\\"bigbox\\\" style=\\\"background-color:#3300CC;\\\">Current Elo Rating:<br><span style=\\\"font-size:75px;display:inline-block;line-height:150px\\\">\" . $currentELO . \"</span></div>\";\n?>" } ]
17
AugustusUkiyo/scrapping-open-food
https://github.com/AugustusUkiyo/scrapping-open-food
32f2b38649d6963a4cee20c7c3a4c513a5868983
74b95f45552245fcf5cfcd186fdc5a55e5fdff91
034b6f25d525417afa183c12308408eff267d700
refs/heads/main
2023-05-08T02:48:13.127194
2021-05-24T13:11:33
2021-05-24T13:11:33
366,861,481
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4970930218696594, "alphanum_fraction": 0.5116279125213623, "avg_line_length": 27.66666603088379, "blob_id": "bbc440495eb59c57a2beb38edff2a016a282bf76", "content_id": "0af065c9cb86dbd76560dd96ac57bc8bec05ac61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/edit.py", "repo_name": "AugustusUkiyo/scrapping-open-food", "src_encoding": "UTF-8", "text": "with open('data.csv', \"r\", encoding='utf-8') as fh:\n lines = fh.readlines()\nwith open('new_data.csv', \"w\", encoding='utf-8') as fh:\n #lines = fh.readlines()\n count = 0\n for line in lines:\n if not line:\n break\n \n line_list = line.split(';')\n if len(line_list) == 25:\n fh.write(line)\n" }, { "alpha_fraction": 0.6126082539558411, "alphanum_fraction": 0.6244233846664429, "avg_line_length": 39.51803207397461, "blob_id": "d39965741ec4d812007ceb16dca22ac81b92f577", "content_id": "6ff80cecf8b410331566bd1aaab0dce397d41548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12366, "license_type": "no_license", "max_line_length": 185, "num_lines": 305, "path": "/scrappy_food.py", "repo_name": "AugustusUkiyo/scrapping-open-food", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nimport time\nimport concurrent.futures\n\nMAX_THREADS = 4\n\n########## Helper functions\n\n# Get score\ndef get_score(url_produit):\n \"\"\"\n Helper function allow to get score with selenimum API \n ['Nutri_Score', 'NOVA', 'Eco_Score']\n \"\"\"\n DRIVER_PATH = './chromedriver'\n driver = webdriver.Chrome(executable_path=DRIVER_PATH)\n driver.get(url_produit)\n el = driver.find_elements_by_xpath(\"//div[@id='product_summary']//h4\")\n Nutri_Score = el[0].text[12:]\n NOVA = el[1].text[5:]\n Eco_Score = el[2].text[10:]\n driver.close()\n return Nutri_Score.replace('\\n', ''), NOVA.replace('\\n', ''), Eco_Score.replace('\\n', '')\n\n# get characteristic produit\ndef caracteristic_du_produit(soup):\n \"\"\"\n \n list_caracteristic_produit = ['Quantite', 'Conditionnement', 'Marques', 'Categories', 'Labels_certifications_recompenses',\n 'Origine_des_ingredient', 'Lieux_de_fabrication_ou_de_transformation', 'Code_de_tracabilite',\n 'Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant', 'Magasins', 'Pays_de_vente']\n \"\"\"\n characteristic_produit = soup.find('div',{'class':\"medium-12 large-8 xlarge-8 xxlarge-8 columns\"})\n o_list_characteristic_produit = characteristic_produit.find_all(\"p\")[1:]\n list_characteristic_produit = [elem.text.replace('\\xa0', '') for elem in o_list_characteristic_produit]\n \n # Quantite\n current_index = 0\n if 'Quantitรฉ' in list_characteristic_produit[current_index]:\n index_0 = list_characteristic_produit[current_index].index(':')\n Quantite = list_characteristic_produit[current_index][index_0 + 2:]\n current_index += 1\n else:\n Quantite = ''\n \n # Conditionnement\n if 'Conditionnement' in list_characteristic_produit[current_index]:\n index_1 = list_characteristic_produit[current_index].index(':')\n Conditionnement = list_characteristic_produit[current_index][index_1 + 2:]\n current_index += 1\n else:\n Conditionnement = ''\n \n # Marques\n if 'Marques' in list_characteristic_produit[current_index]:\n index_2 = list_characteristic_produit[current_index].index(':')\n Marques = list_characteristic_produit[current_index][index_2 + 2:]\n current_index += 1\n else:\n Marques = ''\n \n # Categories\n if 'Catรฉgories' in list_characteristic_produit[current_index]:\n index_3 = list_characteristic_produit[current_index].index(':')\n Categories = list_characteristic_produit[current_index][index_3 + 2:]\n current_index += 1\n else:\n Categories = ''\n \n # Labels_certifications_recompenses\n if 'Labels, certifications, rรฉcompenses' in list_characteristic_produit[current_index]:\n index_4 = list_characteristic_produit[current_index].index(':')\n Labels_certifications_recompenses = list_characteristic_produit[current_index][index_4 + 2:]\n current_index += 1\n else:\n Labels_certifications_recompenses = ''\n \n # Origine_des_ingredient\n if 'Origine des ingrรฉdients' in list_characteristic_produit[current_index]:\n index_5 = list_characteristic_produit[current_index].index(':')\n Origine_des_ingredient = list_characteristic_produit[current_index][index_5 + 2:]\n current_index += 1\n else:\n Origine_des_ingredient = ''\n \n # Lieux_de_fabrication_ou_de_transformation\n if 'Lieux de fabrication ou de transformation' in list_characteristic_produit[current_index]:\n index_6 = list_characteristic_produit[current_index].index(':')\n Lieux_de_fabrication_ou_de_transformation = list_characteristic_produit[current_index][index_6 + 2:]\n current_index += 1\n else:\n Lieux_de_fabrication_ou_de_transformation = ''\n \n # Code_de_tracabilite\n if 'Code de traรงabilitรฉ' in list_characteristic_produit[current_index]:\n index_7 = list_characteristic_produit[current_index].index(':')\n Code_de_tracabilite = list_characteristic_produit[current_index][index_7 + 2:]\n current_index += 1\n else:\n Code_de_tracabilite = ''\n \n # Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant \n if 'Lien vers la page du produit sur le site officiel du fabricant' in list_characteristic_produit[current_index]:\n index_8 = list_characteristic_produit[current_index].index(':')\n Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant = list_characteristic_produit[current_index][index_8 + 2:]\n current_index += 1\n else:\n Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant = ''\n \n # Magasins\n if 'Magasins' in list_characteristic_produit[current_index]:\n index_9 = list_characteristic_produit[current_index].index(':')\n Magasins = list_characteristic_produit[current_index][index_9 + 2:]\n current_index += 1\n else:\n Magasins = ''\n \n # Pays_de_vente\n if 'Pays de vente' in list_characteristic_produit[current_index]:\n index_10 = list_characteristic_produit[current_index].index(':')\n Pays_de_vente = list_characteristic_produit[current_index][index_10 + 2:]\n else:\n Pays_de_vente = ''\n \n return Quantite, Conditionnement.replace('\\n', ''), Marques.replace('\\n', ''), Categories.replace('\\n', ''), Labels_certifications_recompenses.replace('\\n', ''),\\\n Origine_des_ingredient.replace('\\n', ''), Lieux_de_fabrication_ou_de_transformation.replace('\\n', ''), Code_de_tracabilite.replace('\\n', ''),\\\n Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant.replace('\\n', ''), Magasins, Pays_de_vente.replace('\\n', '')\n\n# ingredients_analysis\ndef get_ingredients_analysis(soup):\n \"\"\"\n ingredients_analysis\n \"\"\"\n l_s = []\n if soup.find('p', {'id' : \"ingredients_analysis\"}) is None:\n return l_s\n else:\n for string in soup.find('p', {'id' : \"ingredients_analysis\"}).find_all('span')[:-1]:\n if string.text != '':\n n_string = string.text.replace('\\n', '')\n l_s.append(n_string.replace('\\t', ''))\n return l_s\n \n# reperes nutritionnels\ndef get_repere_nutrition(soup):\n \"\"\"\n \n \"\"\"\n l_ingredient = []\n ingredients = [\"grasses\", \"Acides\", \"Sucres\", \"Sel\"]\n try:\n for ingredient in soup.find_all('div', {'class':\"small-12 xlarge-6 columns\"})[1].text.replace('\\n', ';').split(';'):\n if \"grasses\" in ingredient or \"Acides\" in ingredient or \"Sucres\" in ingredient or \"Sel\" in ingredient:\n l_ingredient.append(ingredient)\n # Matiรจres grasses / Lipides\n current_index = 0\n # Matiรจres grasses / Lipides\n try:\n index_0 = l_ingredient[current_index].index('M')\n Grasses = l_ingredient[current_index][1:index_0 - 1]\n current_index += 1\n except:\n Grasses = ''\n # Acides gras saturรฉs\n try:\n index_1 = l_ingredient[current_index].index('A')\n Acides = l_ingredient[current_index][1:index_1 - 1]\n current_index += 1\n except:\n Acides = ''\n # Sucres\n try:\n index_2 = l_ingredient[current_index].index('S')\n Sucres = l_ingredient[current_index][1:index_2 - 1]\n current_index += 1\n except:\n Sucres = ''\n # Sel\n try:\n index_3 = l_ingredient[current_index].index('S')\n Sel = l_ingredient[current_index][1:index_3 - 1]\n except:\n Sel = ''\n except:\n Grasses, Acides, Sucres, Sel = '', '', '' , ''\n return Grasses.replace('\\n', ''), Acides.replace('\\n', ''), Sucres.replace('\\n', ''), Sel.replace('\\n', '')\n\n# Informations nutritionnelles\ndef get_info_nutri(soup):\n \"\"\"\"\"\"\n try:\n Energie_kJ = soup.find_all('tr', {'id':\"nutriment_energy-kj_tr\"})[0].find_all('td', {'class':\"nutriment_value\"})[0].text.replace('\\n','').replace('\\t','').replace('\\xa0','')\n except:\n Energie_kJ = ''\n try :\n Energie_kcal = soup.find_all('tr', {'id':\"nutriment_energy-kcal_tr\"})[0].find_all('td', {'class':\"nutriment_value\"})[0].text.replace('\\n','').replace('\\t','').replace('\\xa0','')\n except:\n Energie_kcal = ''\n return Energie_kJ.replace('\\n', ''), Energie_kcal.replace('\\n', '')\n\n# Impact environnemental\ndef get_impact_environnemental(soup):\n \"\"\"\n \n \"\"\"\n impact = ''\n for elem in soup.find_all('img', {'style':\"margin-bottom:1rem;max-width:100%\"}):\n if 'Eco-score' in elem['alt']:\n impact = elem['alt'][9:].replace(' ', '')\n return impact.replace('\\n', '')\n\n########## Core Functions\ndef get_page(start=1,end=7981):\n \"\"\"\n time execution: 0m0,883s\n \"\"\"\n list_pages = []\n for i in range(start,end):\n list_pages.append('https://fr.openfoodfacts.org/'+str(i))\n return list_pages\n\ndef get_list_produits_page(url):\n \"\"\"\n time execution: 0m1,195s\n \"\"\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n list_produits = soup.find(\"div\", { 'id' : \"search_results\" })\n list_produit_href = [elem['href'] for elem in list_produits.find_all('a', href=True)]\n #time.sleep(0.1)\n return list_produit_href\n\n\ndef get_produit(url_produit):\n \"\"\"\n time execution: 0m3,005s\n \"\"\"\n url = 'https://fr.openfoodfacts.org/'+url_produit\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n # Name produit\n name = soup.find('h1', {'itemprop' : \"name\"}).text.replace('\\xa0', ' ').replace('\\n', '')\n # Code barre\n code_barre = soup.find('span', {'id' : \"barcode\"}).text.replace('\\n', '') \n # Nutri_Score, NOVA, Eco_Score\n Nutri_Score, NOVA, Eco_Score = get_score(url)\n # Characteristic produit\n Quantite, Conditionnement, Marques, Categories, Labels_certifications_recompenses, \\\n Origine_des_ingredient, Lieux_de_fabrication_ou_de_transformation, Code_de_tracabilite, \\\n Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant, Magasins, Pays_de_vente = caracteristic_du_produit(soup)\n # ingredients\n ingredients = ', '.join(get_ingredients_analysis(soup))\n # reperes nutritionnels\n Grasses, Acides, Sucres, Sel = get_repere_nutrition(soup)\n # categories cochees\n if soup.find('label', {'style' : \"display:inline;font-size:1rem;\"}) is None:\n Categories_cochees = ''\n else:\n Categories_cochees = soup.find('label', {'style' : \"display:inline;font-size:1rem;\"}).text.replace('\\n', '').replace('\\t', '')\n # Informations nutritionnelles\n Energie_kJ, Energie_kcal = get_info_nutri(soup)\n # Impact environnemental\n Impact_environnemental = get_impact_environnemental(soup)\n line = ';'.join([name, code_barre, Nutri_Score, NOVA, Eco_Score, Quantite, Conditionnement, Marques, Categories, \\\n Labels_certifications_recompenses, Origine_des_ingredient, Lieux_de_fabrication_ou_de_transformation, \\\n Code_de_tracabilite, Lien_vers_la_page_du_produit_sur_le_site_officiel_du_fabricant, Magasins, Pays_de_vente, \\\n ingredients, Grasses, Acides, Sucres, Sel, Categories_cochees, Energie_kJ, Energie_kcal, Impact_environnemental])\n \n with open('data.csv', \"a\", encoding='utf-8') as fh:\n fh.write(line + '\\n')\n\ndef download_produit(story_urls):\n #threads = min(MAX_THREADS, len(story_urls))\n #max_workers=threads\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(get_produit, story_urls)\n\ndef main():\n pages = get_page(start=13,end=20)\n # print(pages)\n list_produit = []\n \n # for page in pages[:10]:\n # list_produit.append(get_list_produits_page(url=page))\n with concurrent.futures.ThreadPoolExecutor() as executor:\n list_produit = executor.map(get_list_produits_page, pages)\n # #print(list_produit)\n # for elem in list_produit:\n # download_produit(elem)\n # print(len(list_produit))\n #print(list_produit)\n for p in list_produit:\n download_produit(p)\n \n # download_produit(list_produit)\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n print('Duration: {}'.format(time.time() - start_time))\n\n#print(get_produit(url_produit='produit/3017620422003/nutella-ferrero'))" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "b9f04132255dae0beb6cf9aedb7e543c4b76d9f8", "content_id": "d71571764222bcbe074fd5647c9e4b36026ed4f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "AugustusUkiyo/scrapping-open-food", "src_encoding": "UTF-8", "text": "# scrapping-open-food\nread open food pdf file\n" } ]
3
wnd2da/wavve
https://github.com/wnd2da/wavve
b8fc707a1242705a2255d18d86014c3f4207d27a
059e82faa460177fdc951ee9c04d3759bd8d2a47
4a621350c6556f6cb0160cb8ad890db38c25cdcf
refs/heads/master
2022-12-05T20:54:49.873221
2020-08-23T05:41:26
2020-08-23T05:41:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5591906905174255, "alphanum_fraction": 0.5607691407203674, "avg_line_length": 32.6716423034668, "blob_id": "072b18cae84d0a3624e73658a0f076a03cdd3ac3", "content_id": "6b83e0469b7083be338d021c04b4f5b7b26990b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6995, "license_type": "no_license", "max_line_length": 115, "num_lines": 201, "path": "/model.py", "repo_name": "wnd2da/wavve", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n#########################################################\r\n# python\r\nimport os\r\nimport traceback\r\nfrom datetime import datetime\r\nimport json\r\n\r\n# third-party\r\nfrom sqlalchemy import or_, and_, func, not_\r\nfrom sqlalchemy.orm import backref\r\n\r\n# sjva ๊ณต์šฉ\r\nfrom framework import db, path_app_root, app\r\nfrom framework.util import Util\r\n\r\n# ํŒจํ‚ค์ง€\r\nfrom .plugin import logger, package_name\r\n#########################################################\r\n\r\n\r\ndb_file = os.path.join(path_app_root, 'data', 'db', '%s.db' % package_name)\r\napp.config['SQLALCHEMY_BINDS'][package_name] = 'sqlite:///%s' % (db_file)\r\n\r\nclass ModelSetting(db.Model):\r\n __tablename__ = 'plugin_%s_setting' % package_name\r\n __table_args__ = {'mysql_collate': 'utf8_general_ci'}\r\n __bind_key__ = package_name\r\n\r\n id = db.Column(db.Integer, primary_key=True)\r\n key = db.Column(db.String(100), unique=True, nullable=False)\r\n value = db.Column(db.String, nullable=False)\r\n \r\n def __init__(self, key, value):\r\n self.key = key\r\n self.value = value\r\n\r\n def __repr__(self):\r\n return repr(self.as_dict())\r\n\r\n def as_dict(self):\r\n return {x.name: getattr(self, x.name) for x in self.__table__.columns}\r\n\r\n @staticmethod\r\n def get(key):\r\n try:\r\n return db.session.query(ModelSetting).filter_by(key=key).first().value.strip()\r\n except Exception as e:\r\n logger.error('Exception:%s %s', e, key)\r\n logger.error(traceback.format_exc())\r\n \r\n \r\n @staticmethod\r\n def get_int(key):\r\n try:\r\n return int(ModelSetting.get(key))\r\n except Exception as e:\r\n logger.error('Exception:%s %s', e, key)\r\n logger.error(traceback.format_exc())\r\n \r\n @staticmethod\r\n def get_bool(key):\r\n try:\r\n return (ModelSetting.get(key) == 'True')\r\n except Exception as e:\r\n logger.error('Exception:%s %s', e, key)\r\n logger.error(traceback.format_exc())\r\n\r\n @staticmethod\r\n def set(key, value):\r\n try:\r\n item = db.session.query(ModelSetting).filter_by(key=key).with_for_update().first()\r\n if item is not None:\r\n item.value = value.strip()\r\n db.session.commit()\r\n else:\r\n db.session.add(ModelSetting(key, value.strip()))\r\n except Exception as e:\r\n logger.error('Exception:%s %s', e, key)\r\n logger.error(traceback.format_exc())\r\n\r\n @staticmethod\r\n def to_dict():\r\n try:\r\n ret = Util.db_list_to_dict(db.session.query(ModelSetting).all())\r\n ret['package_name'] = package_name\r\n return ret \r\n except Exception as e:\r\n logger.error('Exception:%s ', e)\r\n logger.error(traceback.format_exc())\r\n\r\n\r\n @staticmethod\r\n def setting_save(req):\r\n try:\r\n for key, value in req.form.items():\r\n logger.debug('Key:%s Value:%s', key, value)\r\n if key in ['scheduler', 'is_running']:\r\n continue\r\n entity = db.session.query(ModelSetting).filter_by(key=key).with_for_update().first()\r\n entity.value = value\r\n db.session.commit()\r\n return True \r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n logger.debug('Error Key:%s Value:%s', key, value)\r\n return False\r\n\r\n#########################################################\r\n\r\n\r\nclass ModelWavveEpisode(db.Model):\r\n __tablename__ = 'plugin_%s_auto_episode' % package_name\r\n __table_args__ = {'mysql_collate': 'utf8_general_ci'}\r\n __bind_key__ = package_name\r\n\r\n id = db.Column(db.Integer, primary_key=True)\r\n contents_json = db.Column(db.JSON)\r\n streaming_json = db.Column(db.JSON)\r\n created_time = db.Column(db.DateTime)\r\n\r\n channelname = db.Column(db.String)\r\n \r\n programid = db.Column(db.String)\r\n programtitle = db.Column(db.String)\r\n \r\n contentid = db.Column(db.String)\r\n releasedate = db.Column(db.String)\r\n episodenumber = db.Column(db.String)\r\n episodetitle = db.Column(db.String)\r\n quality = db.Column(db.String)\r\n\r\n vod_type = db.Column(db.String) #general onair\r\n image = db.Column(db.String)\r\n playurl = db.Column(db.String)\r\n \r\n filename = db.Column(db.String)\r\n duration = db.Column(db.Integer)\r\n start_time = db.Column(db.DateTime)\r\n end_time = db.Column(db.DateTime)\r\n download_time = db.Column(db.Integer)\r\n completed = db.Column(db.Boolean)\r\n user_abort = db.Column(db.Boolean)\r\n pf_abort = db.Column(db.Boolean)\r\n etc_abort = db.Column(db.Integer) #ffmpeg ์›์ธ 1, ์ฑ„๋„, ํ”„๋กœ๊ทธ๋žจ\r\n ffmpeg_status = db.Column(db.Integer)\r\n temp_path = db.Column(db.String)\r\n save_path = db.Column(db.String)\r\n pf = db.Column(db.Integer)\r\n retry = db.Column(db.Integer)\r\n filesize = db.Column(db.Integer)\r\n filesize_str = db.Column(db.String)\r\n download_speed = db.Column(db.String)\r\n call = db.Column(db.String)\r\n\r\n def __init__(self, call, info, streaming):\r\n self.created_time = datetime.now()\r\n self.completed = False\r\n self.user_abort = False\r\n self.pf_abort = False\r\n self.etc_abort = 0\r\n self.ffmpeg_status = -1\r\n self.pf = 0\r\n self.retry = 0\r\n self.call = call\r\n self.set_info(info)\r\n self.set_streaming(streaming)\r\n\r\n\r\n def __repr__(self):\r\n #return \"<Episode(id:%s, episode_code:%s, quality:%s)>\" % (self.id, self.episode_code, self.quality)\r\n return repr(self.as_dict())\r\n\r\n def as_dict(self):\r\n ret = {x.name: getattr(self, x.name) for x in self.__table__.columns}\r\n ret['created_time'] = self.created_time.strftime('%m-%d %H:%M:%S') if self.created_time is not None else ''\r\n ret['start_time'] = self.start_time.strftime('%m-%d %H:%M:%S') if self.start_time is not None else ''\r\n ret['end_time'] = self.end_time.strftime('%m-%d %H:%M:%S') if self.end_time is not None else ''\r\n return ret\r\n\r\n def set_info(self, data):\r\n self.contents_json = data\r\n self.channelname = data['channelname']\r\n \r\n self.programid = data['programid']\r\n self.programtitle = data['programtitle']\r\n \r\n self.contentid = data['contentid']\r\n self.releasedate = data['releasedate']\r\n self.episodenumber = data['episodenumber']\r\n self.episodetitle = data['episodetitle']\r\n self.image = 'https://' + data['image']\r\n self.vod_type = data['type']\r\n\r\n def set_streaming(self, data):\r\n self.streaming_json = data\r\n self.playurl = data['playurl']\r\n import framework.wavve.api as Wavve\r\n self.filename = Wavve.get_filename(self.contents_json, data['quality'])\r\n self.quality = data['quality']\r\n" } ]
1
vgamula/sp
https://github.com/vgamula/sp
492279e21716c3afd162553f6d5d78ba89a8f25a
16e1dcbf15c1a76d44b15bc914a001167a43c05e
539693374bf3de4999771e8fdc280729b93ed6f6
refs/heads/master
2021-01-19T04:12:41.540915
2017-04-18T17:44:33
2017-04-18T17:44:33
75,880,210
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7118194103240967, "alphanum_fraction": 0.7184594869613647, "avg_line_length": 26.381818771362305, "blob_id": "c76d4d0d41bb582988942f892fb18e374b972794", "content_id": "1d294eae46d0d1348bd21452809921340e4726c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1506, "license_type": "permissive", "max_line_length": 84, "num_lines": 55, "path": "/server/main.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import asyncio\nimport pathlib\n\nimport uvloop\nimport jinja2\nfrom aiohttp import web\nfrom aiohttp_session import setup as session_setup\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\nfrom aiohttp_jinja2 import setup as jinja2_setup\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\nfrom .core.middlewares import error_pages\nfrom .routes import make_routes\nfrom . import settings\n\nPROJECT_ROOT = pathlib.Path(__file__).parent.parent\nSERVER_ROOT = PROJECT_ROOT / 'server'\n\n\nasync def handle(request: web.Request):\n return web.Response(text='Hello, World!')\n\n\ndef make_app(loop: asyncio.AbstractEventLoop=None) -> web.Application:\n middlewares = [error_pages()]\n app = web.Application(middlewares=middlewares, debug=settings.DEBUG)\n app._set_loop(loop)\n\n app['settings'] = settings\n\n # Session setup\n session_setup(app, EncryptedCookieStorage(settings.SECRET_KEY))\n\n # DB setup\n client = AsyncIOMotorClient(settings.DATABASE_URL)\n app['db'] = client.sp\n\n # Templates setup\n jinja2_setup(\n app,\n loader=jinja2.FileSystemLoader(str(SERVER_ROOT / 'templates')),\n extensions=['server.core.webpack_loader.contrib.jinja2ext.WebpackExtension']\n )\n\n make_routes(app, str(SERVER_ROOT / 'static'))\n app.router.add_get('/', handle)\n return app\n\n\n\nif __name__ == '__main__':\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n loop = asyncio.get_event_loop()\n app = make_app(loop=loop)\n web.run_app(app, port=8888)\n" }, { "alpha_fraction": 0.7215363383293152, "alphanum_fraction": 0.7242798209190369, "avg_line_length": 23.299999237060547, "blob_id": "42ba9a933b869fefe94b01b20f379411f5e5d86c", "content_id": "d7da3449c4065b52449c11fc2655ff6b0a4be363", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 729, "license_type": "permissive", "max_line_length": 89, "num_lines": 30, "path": "/Makefile", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "WEBPACK=./node_modules/.bin/webpack\nWEBPACK_DASHBOARD=./node_modules/.bin/webpack-dashboard\n\n\nrun:\n\tPYTHONASYNCIODEBUG=1 python3 -m server.main\n\nwatch: build_dll\n\t${WEBPACK_DASHBOARD} -- node webpack-configs/server.js\n\nbuild: clean build_dll_prod\n\t${WEBPACK} -p --config webpack-configs/production.js --progress --colors\n\nbuild_dll:\n\t${WEBPACK} --config webpack-configs/vendor.js --progress --colors\n\nbuild_dll_prod:\n\tNODE_ENV=production ${WEBPACK} -p --config webpack-configs/vendor.js --progress --colors\n\nlint:\n\t./node_modules/.bin/eslint client/\n\ntest:\n\tpy.test -x -s\n\nclean:\n\tfind . | grep -E \"(__pycache__|\\.pyc|\\.pyo)\" | xargs rm -rf\n\trm -rf server/static/dist\n\tmkdir server/static/dist\n\ttouch server/static/dist/.gitkeep\n" }, { "alpha_fraction": 0.6708074808120728, "alphanum_fraction": 0.7204968929290771, "avg_line_length": 19.125, "blob_id": "f8e27f329acc3054c902e6f51e6cd76e13571482", "content_id": "696c4601ea55211ac5f54abe872689c51ebf5184", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "permissive", "max_line_length": 44, "num_lines": 16, "path": "/server/core/views.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp import web\nfrom aiohttp_jinja2 import template\n\n\n@template('core/403.jinja2')\nasync def handle_403(request: web.Request):\n return {}\n\n\n@template('core/404.jinja2')\nasync def handle_404(request: web.Request):\n return {}\n\n@template('app.jinja2')\nasync def application(request: web.Request):\n return {}\n" }, { "alpha_fraction": 0.5366379022598267, "alphanum_fraction": 0.5495689511299133, "avg_line_length": 33.37036895751953, "blob_id": "c0442221ce2d7dd367574bb9603a33c41fcc13d1", "content_id": "e2133c33847319c3742f605fc290ab2d18fe46b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "permissive", "max_line_length": 63, "num_lines": 27, "path": "/server/core/middlewares.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp import web\nfrom . import views\n\n\ndef error_pages():\n overrides = {\n 403: views.handle_403,\n 404: views.handle_404,\n }\n async def middleware(app, handler):\n async def middleware_handler(request):\n is_api_request = request.headers.get('API-Request')\n try:\n response = await handler(request)\n override = overrides.get(response.status)\n if override is not None and not is_api_request:\n return await override(request, response)\n else:\n return response\n except web.HTTPException as ex:\n override = overrides.get(ex.status)\n if override is not None and not is_api_request:\n return await override(request)\n else:\n raise ex\n return middleware_handler\n return middleware\n" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.5918367505073547, "avg_line_length": 20, "blob_id": "7c4ac8c8775e3dcb94af01d478354881699a54fe", "content_id": "90ba8a0e4e1ee9eef3410d1edb86d30d10971aa1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 147, "license_type": "permissive", "max_line_length": 51, "num_lines": 7, "path": "/client/reloadable.js", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import React from 'react'\n\nexport const Test = () => ( // eslint-disable-line\n <div>\n This is an example of hot reloading.\n </div>\n)\n" }, { "alpha_fraction": 0.5707788467407227, "alphanum_fraction": 0.575895369052887, "avg_line_length": 29.327587127685547, "blob_id": "83b3bd3e1da42ba1aca3dc3ec11d28ffffadafc8", "content_id": "dbc32ac0b97b14e305822465e318ba6b832a9be2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1759, "license_type": "permissive", "max_line_length": 75, "num_lines": 58, "path": "/server/accounts/forms.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import trafaret as t\n\nfrom server.core.passwords import generate_password, check_password\nfrom server.core.forms import TrafaretForm, TrafaretError\n\n\nclass RegistrationForm(TrafaretForm):\n fields = t.Dict({\n t.Key('email'): t.Email(),\n t.Key('password'): t.String(max_length=255),\n t.Key('confirm'): t.String(max_length=255),\n t.Key('accept_tos'): t.StrBool(),\n })\n\n async def extra_validation(self):\n errors = {}\n if self.data['confirm'] != self.data['password']:\n errors['confirm'] = 'Passwords should match.'\n\n if await self.db.users.find_one({'email': self.data['email']}):\n errors['email'] = 'User with this email is already registered.'\n\n if errors:\n raise TrafaretError(errors)\n\n async def save(self):\n data = self.data\n data_to_save = {\n 'email': data['email'],\n 'password': generate_password(data['password']),\n }\n result = await self.db.users.insert_one(data_to_save)\n data_to_save['_id'] = result.inserted_id\n return data_to_save\n\n\nclass LoginForm(TrafaretForm):\n user = None\n\n fields = t.Dict({\n t.Key('email'): t.Email(),\n t.Key('password'): t.String(max_length=255),\n })\n\n async def extra_validation(self):\n errors = {}\n user = await self.db.users.find_one({'email': self.data['email']})\n if not user:\n errors['email'] = 'User not found'\n else:\n if not check_password(self.data['password'], user.password):\n errors['password'] = 'Password is not correct'\n self.user = user\n if errors:\n raise TrafaretError(errors)\n\n def get_user(self):\n return self.user\n" }, { "alpha_fraction": 0.4874371886253357, "alphanum_fraction": 0.4874371886253357, "avg_line_length": 17.090909957885742, "blob_id": "b492315dc232c779a6fc43ea51b71e14bd181272", "content_id": "e41247a1f7d6156b76a66de9af9933547886a744", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 199, "license_type": "permissive", "max_line_length": 41, "num_lines": 11, "path": "/client/app.js", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport { Test } from 'reloadable'\n\nexport default function Hello({ name }) {\n return (\n <div>\n Hey, {name}!\n <Test />\n </div>\n )\n}\n" }, { "alpha_fraction": 0.5840708017349243, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 26.056337356567383, "blob_id": "1bb30016c5d7bd8e3f0fae28ca9fa485212a839d", "content_id": "41e7edeae4efe16fc7c5af8aa5ac775e4e3cc1fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1921, "license_type": "permissive", "max_line_length": 118, "num_lines": 71, "path": "/webpack-configs/development.js", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "/* eslint-disable */\n\nconst path = require('path');\nconst webpack = require('webpack');\nconst BundleTracker = require('webpack-bundle-tracker');\nconst AnyBarWebpackPlugin = require('anybar-webpack');\nrequire('babel-polyfill');\n\nconst projectPath = path.resolve(__dirname, '..');\nconst sourcePath = './client/';\nconst sourceFilePath = './client/index';\nconst destinationPath = './server/static/dist/';\nconst statsFile = destinationPath + 'stats.json';\nconst vendorManifestFile = destinationPath + 'vendor-manifest.json';\nconst publicPath = 'http://localhost:3000/static/dist/';\n\nconst manifest = require('.' + vendorManifestFile);\n\n\nmodule.exports = {\n context: projectPath,\n\n entry: {\n main: [\n 'babel-polyfill',\n 'webpack-dev-server/client?http://localhost:3000',\n 'webpack/hot/only-dev-server',\n sourceFilePath,\n ]\n },\n\n output: {\n path: path.join(projectPath, destinationPath),\n filename: '[name]-[hash].js',\n publicPath: publicPath,\n },\n\n plugins: [\n new webpack.NoErrorsPlugin(),\n new webpack.HotModuleReplacementPlugin(),\n new webpack.DllReferencePlugin({\n name: 'vendor',\n context: sourcePath,\n manifest: manifest,\n }),\n new BundleTracker({filename: statsFile}),\n new AnyBarWebpackPlugin(),\n ],\n\n module: {\n loaders: [\n {test: /\\.js$/, exclude: /node_modules/, loaders: ['react-hot-loader/webpack', 'babel', 'eslint-loader']},\n ]\n },\n\n resolve: {\n root: [\n path.join(projectPath, sourcePath),\n ],\n modulesDirectories: ['node_modules'],\n extensions: ['', '.js']\n },\n\n resolveLoader: {\n modulesDirectories: ['node_modules'],\n moduleTemplates: ['*-loader', '*'],\n extensions: ['', '.js']\n },\n watch: true,\n devtool: 'source-map',\n};\n" }, { "alpha_fraction": 0.7404580116271973, "alphanum_fraction": 0.7404580116271973, "avg_line_length": 22.81818199157715, "blob_id": "cd87059751992c54602fab970edc2f0fcc44cd2d", "content_id": "dfb9acc9821d56c2f770524f89090785bc33184d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "permissive", "max_line_length": 65, "num_lines": 11, "path": "/server/tests/__init__.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop\n\nfrom server.main import make_app\n\n\nclass BaseAsyncTestCase(AioHTTPTestCase):\n async def get_application(self):\n return make_app()\n\n\n__all__ = ['unittest_run_loop', 'BaseAsyncTestCase']\n" }, { "alpha_fraction": 0.5993537902832031, "alphanum_fraction": 0.6074313521385193, "avg_line_length": 23.760000228881836, "blob_id": "62d7dae253b3315dd2e2fcf26d6d2805187f22e6", "content_id": "a0b70d40d6492773b602103dc5d8405bf0882bc8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "permissive", "max_line_length": 97, "num_lines": 25, "path": "/server/settings/__init__.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import os\nfrom pathlib import Path\n\nget = os.environ.get\n\nDEBUG = False\nENVIRONMENT = get('ENV', 'production') # development, testing\nDATABASE_URL = get('DATABASE_URL', 'mongodb://localhost:27017')\nSECRET_KEY = get('SECRET_KEY', '109la0m3tK8ErcOJGJNqkQTU-KdvEqw8oEnfKZ556LQ=')\n\ntry:\n from .local_settings import *\nexcept ImportError:\n pass\n\nWEBPACK_CONFIG = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'STATS_FILE': str(Path(__file__).parent.parent / 'static' / 'dist' / 'stats.json')\n },\n 'VENDOR': {\n 'CACHE': not DEBUG,\n 'STATS_FILE': str(Path(__file__).parent.parent / 'static' / 'dist' / 'vendor-stats.json')\n }\n}\n" }, { "alpha_fraction": 0.6443662047386169, "alphanum_fraction": 0.672535240650177, "avg_line_length": 24.81818199157715, "blob_id": "c655057cda2e71d0d19fb842d2ab39d42d2a3afd", "content_id": "5204d1dbe3b935fc91955f0112dc0897637cd62d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 568, "license_type": "permissive", "max_line_length": 60, "num_lines": 22, "path": "/webpack-configs/server.js", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "/* eslint-disable */\n\nconst webpack = require('webpack');\nconst WebpackDevServer = require('webpack-dev-server');\nconst DashboardPlugin = require('webpack-dashboard/plugin');\n\nconst config = require('./development.js');\n\nconst compiler = webpack(config);\ncompiler.apply(new DashboardPlugin());\n\nnew WebpackDevServer(compiler, {\n publicPath: config.output.publicPath,\n hot: true,\n inline: true,\n historyApiFallback: true,\n}).listen(3000, '0.0.0.0', (err) => {\n if (err) {\n console.log(err);\n }\n console.log('Listening at 0.0.0.0:3000');\n});\n" }, { "alpha_fraction": 0.5089820623397827, "alphanum_fraction": 0.6946107745170593, "avg_line_length": 15.699999809265137, "blob_id": "2d1f958a052390c019ae71a4825d1eac6e1852d7", "content_id": "bc0f65f4c074d1abd2408c968bce22127307a341", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 167, "license_type": "permissive", "max_line_length": 22, "num_lines": 10, "path": "/requirements.txt", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "aiodns==1.1.1\naiohttp==2.0.5\naiohttp_jinja2==0.13.0\naiohttp_session==0.8.0\ncchardet==1.1.3\ncryptography==1.8.1\nmotor==1.1\npasslib==1.7.1\ntrafaret==0.9.0\nuvloop==0.8.0\n" }, { "alpha_fraction": 0.6968085169792175, "alphanum_fraction": 0.6968085169792175, "avg_line_length": 19.88888931274414, "blob_id": "f8853f9c7ac6a1a6ea6281ca58df7ed5e499f98b", "content_id": "583cfa99a50ce2269c0db93a0fe4516faea2237b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "permissive", "max_line_length": 47, "num_lines": 9, "path": "/server/accounts/auth.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "USER_ID_SESSION_KEY = 'user_id'\n\n\ndef login_user(session, user_id):\n session[USER_ID_SESSION_KEY] = str(user_id)\n\n\ndef logout_user(session):\n return session.pop(USER_ID_SESSION_KEY)\n" }, { "alpha_fraction": 0.5996275544166565, "alphanum_fraction": 0.5996275544166565, "avg_line_length": 28.83333396911621, "blob_id": "3a4952e69ec785c647456d446c97340183a9a536", "content_id": "1cfa8e307e067f0500683a95ab3c5b5ea667e8a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "permissive", "max_line_length": 62, "num_lines": 36, "path": "/server/core/webpack_loader/loader.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import json\n\nfrom .exceptions import WebpackException\n\n\nclass WebpackLoader:\n name = None\n config = None\n stats_data = None\n\n def __init__(self, name, config):\n self.name = name\n self.config = config\n\n def load_assets(self):\n if self.config['CACHE'] and self.stats_data:\n return self.stats_data\n with open(self.config['STATS_FILE']) as f:\n self.stats_data = json.load(f)\n return self.stats_data\n\n def _get_asset_field(self, entry_name, field):\n data = self.load_assets()\n if data['status'] != 'done':\n raise WebpackException(data['message'])\n for chunk in data['chunks'][entry_name]:\n if not chunk['name'].endswith('.map'):\n return chunk[field]\n return chunk['name']\n raise WebpackException('Unhandled error')\n\n def get_asset_name(self, entry_name):\n return self._get_asset_field(entry_name, 'name')\n\n def get_asset_public_path(self, entry_name):\n return self._get_asset_field(entry_name, 'publicPath')\n" }, { "alpha_fraction": 0.7126865386962891, "alphanum_fraction": 0.7126865386962891, "avg_line_length": 18.14285659790039, "blob_id": "69a9d7eb16f69ac38b1e43d600e7185922ef31f6", "content_id": "658fa71a1a2a3e3ce8da5df29492459cbda6eacf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "permissive", "max_line_length": 51, "num_lines": 14, "path": "/server/core/tests/test_passwords.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom .. import passwords\n\nparams = [\n 'Hello!',\n 'World',\n]\n\n\[email protected]('password', params)\ndef test_generated_password(password):\n hash = passwords.generate_password(password)\n assert passwords.check_password(password, hash)\n" }, { "alpha_fraction": 0.7044335007667542, "alphanum_fraction": 0.7044335007667542, "avg_line_length": 42.5, "blob_id": "d687ed967f9aa4af5abc823bfd8fc2b7eb279ea1", "content_id": "33ad902c0784fd069a7b38857d88abf7f52516cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 93, "num_lines": 14, "path": "/server/routes.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp.web import Application\n\nfrom .accounts import views as accounts_views\nfrom .core import views as core_views\n\n\ndef make_routes(app: Application, directory_root: str):\n app.router.add_route('*', '/test', accounts_views.simple_test_view, name='view_for_test')\n app.router.add_route('*', '/signup', accounts_views.signup, name='signup')\n app.router.add_route('*', '/login', accounts_views.login)\n app.router.add_route('GET', '/app', core_views.application)\n\n if app.debug: # In production mode it will be handled by web-server\n app.router.add_static('/static', directory_root)\n" }, { "alpha_fraction": 0.686956524848938, "alphanum_fraction": 0.739130437374115, "avg_line_length": 24.55555534362793, "blob_id": "3ddf749adcebe66ac7563b07c545eb6917a69866", "content_id": "79354fe3191e9c9186db7af328903a5fdb5ce02f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "permissive", "max_line_length": 53, "num_lines": 9, "path": "/server/core/passwords.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from passlib.hash import pbkdf2_sha256\n\n\ndef generate_password(password: str) -> str:\n return pbkdf2_sha256.hash(password)\n\n\ndef check_password(password: str, hash: str) -> bool:\n return pbkdf2_sha256.verify(password, hash)\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 22.11111068725586, "blob_id": "81dac1db900a7fcb3cefa83b876ecc2b825e7fed", "content_id": "464cbd5720c10ecd4591428ed8c6236b2a09dd26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "permissive", "max_line_length": 57, "num_lines": 9, "path": "/server/core/webpack_loader/utils.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from .loader import WebpackLoader\n\nloaders = {}\n\n\ndef get_loader(name, config) -> WebpackLoader:\n if name not in loaders:\n loaders[name] = WebpackLoader(name, config[name])\n return loaders[name]\n" }, { "alpha_fraction": 0.8068181872367859, "alphanum_fraction": 0.8068181872367859, "avg_line_length": 16.600000381469727, "blob_id": "491ed33c4994f9c29bc38ffaa0d7de6d019127f3", "content_id": "bd5a89e493c274c69fdda5765dafe3d34a50035a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "permissive", "max_line_length": 38, "num_lines": 5, "path": "/server/core/webpack_loader/exceptions.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp.web import HTTPException\n\n\nclass WebpackException(HTTPException):\n pass\n" }, { "alpha_fraction": 0.5809128880500793, "alphanum_fraction": 0.5809128880500793, "avg_line_length": 22.512195587158203, "blob_id": "e6ee264b64b5d00baa29f4f6e7f8c3c30c19708a", "content_id": "463a12d3edaf0a49eedb239f8f358ebe2d93a8d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "permissive", "max_line_length": 72, "num_lines": 41, "path": "/server/core/forms.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import asyncio\nfrom motor.motor_asyncio import AsyncIOMotorDatabase\n\nimport trafaret\n\n\nclass TrafaretError(Exception):\n def __init__(self, errors, *args, **kwargs):\n self.errors = errors\n\n\nclass TrafaretForm:\n fields = None\n result = None\n errors = None\n\n def __init__(self, data: dict=None, db: AsyncIOMotorDatabase=None):\n self.db = db\n self.errors = {}\n if data:\n self.data = data\n else:\n self.data = {}\n\n async def extra_validation(self):\n pass\n\n async def is_valid(self):\n try:\n self.result = self.fields.check(self.data)\n await self.extra_validation()\n return True\n except trafaret.DataError:\n self.errors = trafaret.extract_error(self.fields, self.data)\n return False\n except TrafaretError as e:\n self.errors = e.errors\n return False\n\n async def save(self):\n pass\n" }, { "alpha_fraction": 0.6217105388641357, "alphanum_fraction": 0.6381579041481018, "avg_line_length": 32.77777862548828, "blob_id": "717b68699de2549cfcf14d66abcad4d0cfe56989", "content_id": "4282f2f7e53ac03af3c3a6a97d0ab1f6bef45af3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "permissive", "max_line_length": 61, "num_lines": 27, "path": "/server/accounts/tests/test_views.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from server.tests import BaseAsyncTestCase, unittest_run_loop\n\n\nclass AccountViewsTestCase(BaseAsyncTestCase):\n @unittest_run_loop\n async def test_simple_test_view(self):\n resp = await self.client.get('/test')\n assert resp.status == 200\n assert await resp.text() == 'Test response'\n\n @unittest_run_loop\n async def test_simple_test_view_1(self):\n resp = await self.client.get('/test')\n assert resp.status == 200\n assert await resp.text() == 'Test response'\n\n @unittest_run_loop\n async def test_simple_test_view_2(self):\n resp = await self.client.get('/test')\n assert resp.status == 200\n assert await resp.text() == 'Test response'\n\n @unittest_run_loop\n async def test_simple_test_view_3(self):\n resp = await self.client.get('/test')\n assert resp.status == 200\n assert await resp.text() == 'Test response'\n" }, { "alpha_fraction": 0.6409448981285095, "alphanum_fraction": 0.6440944671630859, "avg_line_length": 32.421051025390625, "blob_id": "ac784316710b30097d1bf1b526866df2efe00827", "content_id": "040ba4429ffe83f8f07ea14223b3ecd2c0b24572", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "permissive", "max_line_length": 88, "num_lines": 19, "path": "/server/core/webpack_loader/contrib/jinja2ext.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "import jinja2.ext\n\nfrom ..utils import get_loader\n\n\ndef get_bundle(entry_name, bundle_name, config, load_file, static_path='/static/dist/'):\n loader = get_loader(bundle_name, config)\n if load_file:\n asset_name = loader.get_asset_name(entry_name)\n name = '{}{}'.format(static_path, asset_name)\n else:\n name = loader.get_asset_public_path(entry_name)\n return '<script src=\"{}\"></script>'.format(name)\n\n\nclass WebpackExtension(jinja2.ext.Extension):\n def __init__(self, environment):\n super().__init__(environment)\n environment.globals['get_bundle'] = lambda *a, **k: get_bundle(*a, **k)\n" }, { "alpha_fraction": 0.8196721076965332, "alphanum_fraction": 0.8196721076965332, "avg_line_length": 7.714285850524902, "blob_id": "178f28f7f401f769eff562b707aef2b5c52ec2b3", "content_id": "c5cb82f55620e8cf960aa0af203d4f45a26b0edc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 61, "license_type": "permissive", "max_line_length": 19, "num_lines": 7, "path": "/requirements-dev.txt", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "-r requirements.txt\n\ncoverage\nipython\nipdb\npytest\npytest-cov\n" }, { "alpha_fraction": 0.6378481984138489, "alphanum_fraction": 0.6426513195037842, "avg_line_length": 25.69230842590332, "blob_id": "cb6cb305799aca72166f2ec5b6b2f4efd7ca465d", "content_id": "ea7c9cb61a413fac2aec03ba6d19db1ce001d322", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "permissive", "max_line_length": 72, "num_lines": 39, "path": "/server/accounts/views.py", "repo_name": "vgamula/sp", "src_encoding": "UTF-8", "text": "from aiohttp import web\nfrom aiohttp_jinja2 import template\nfrom aiohttp_session import get_session\n\nfrom .forms import RegistrationForm\nfrom . import auth\n\n\n@template('accounts/signup.jinja2')\nasync def signup(request: web.Request):\n if request.method == 'POST':\n data = await request.post()\n form = RegistrationForm(data, db=request.app['db'])\n if await form.is_valid():\n user = await form.save()\n session = await get_session(request)\n auth.login_user(session, user['_id'])\n return {'form': form, 'message': 'User has been registered'}\n else:\n form = RegistrationForm()\n return {'form': form}\n\n\n@template('accounts/login.jinja2')\ndef login(request: web.Request):\n return {\n 'a': '1',\n 'b': '2'\n }\n\n\nasync def logout(request: web.Request):\n session = await get_session(request)\n auth.logout_user(session)\n return web.HTTPFound('/')\n\n\nasync def simple_test_view(request: web.Request):\n return web.Response(body='Test response')\n" } ]
24
gebi/csv-test
https://github.com/gebi/csv-test
8c6f855070ca6df25010b2f54076e55bf009d522
032a9ab8193da0d9a469773ebbff2d274bd97507
48781fc20263c1998054570bd5a17678e2936fc5
refs/heads/master
2023-02-20T08:02:18.146753
2023-02-14T10:37:34
2023-02-14T10:37:34
14,152,956
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6483705043792725, "alphanum_fraction": 0.6552315354347229, "avg_line_length": 26.761905670166016, "blob_id": "7ccf0839406be22c09392b68299df6d7f0c43393", "content_id": "bdf0d2a59b339488792151952c7af8d177f74f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 89, "num_lines": 21, "path": "/csv-test.py", "repo_name": "gebi/csv-test", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport csv\nimport sys\nimport optparse\n\nparser = optparse.OptionParser()\nparser.add_option(\"--trim_leading_space\", dest=\"trim_leading_space\", action='store_true',\n default=False, help=\"Trim leading space on begin of csv records\")\n(opts, args) = parser.parse_args()\n\nspamreader = csv.reader(sys.stdin, skipinitialspace=opts.trim_leading_space)\nlineno = 1\nfor row in spamreader:\n elemno = 0\n print lineno, \"-\",\n for elem in row:\n sys.stdout.write(\" %d:\\\"%s\\\"\" %(elemno, elem.replace(\"\\n\", \"\\\\n\")))\n elemno += 1\n print\n lineno += 1\n" }, { "alpha_fraction": 0.6113602519035339, "alphanum_fraction": 0.6173393130302429, "avg_line_length": 17.58333396911621, "blob_id": "1dcad80168d749c7f922e719bb6ad4606e61c2df", "content_id": "0c6e886736935b7c506bed9da227da8ef2676446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 669, "license_type": "no_license", "max_line_length": 96, "num_lines": 36, "path": "/csv-test.go", "repo_name": "gebi/csv-test", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc main() {\n\tp := csv.NewReader(os.Stdin)\n\tflag.BoolVar(&p.LazyQuotes, \"lazy_quotes\", false, \"Parse csv with lazy quote rules\")\n\tflag.BoolVar(&p.TrimLeadingSpace, \"trim_leading_space\", false, \"Trim leading spaces\")\n\tflag.IntVar(&p.FieldsPerRecord, \"columns\", -1, \"Set number of columns (-1 disables all checks\")\n\tflag.Parse()\n\n\tlineno := 1\n\tfor {\n\t\trecord, err := p.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Printf(\"%d -\", lineno)\n\t\tfor num, elem := range record {\n\t\t\tfmt.Printf(\" %d:%q\", num, elem)\n\t\t}\n\t\tfmt.Println()\n\t\tlineno++\n\t}\n}\n" }, { "alpha_fraction": 0.5540616512298584, "alphanum_fraction": 0.605042040348053, "avg_line_length": 29.25423812866211, "blob_id": "b18ef4e071d78bc4ab90f58d6940dc351816842d", "content_id": "ef41c06643c0334868e4686488db14cbb85ee7b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1805, "license_type": "no_license", "max_line_length": 85, "num_lines": 59, "path": "/README.md", "repo_name": "gebi/csv-test", "src_encoding": "UTF-8", "text": "csv-test\n========\n\ncsv test programs in various languages including evil csv input\n\nCurrently implemented in Go and Pyhon.\nConclusion for the current implementations with default settings is that\nPython and Go differ in that python parses evil.csv without errors but\nintroduces a silent data corruption.\nGo produces a parse error and let the developer decide what to do with leading\nspaces in csv fields.\n\nGo\n--\n\n % go run csv-test.go <evil.csv \n 1 - 0:\"character\" 1:\"quote\"\n 2 - 0:\"Inigo\" 1:\"You killed my father\\nDarth, I am your father\"\n 3 - 0:\"Buddha\" 1:\"\"\n 4 - 0:\"Dada\" 1:\"'dodo'\"\n 5 - 0:\"Evil Guy\" 1:\"\\\";drop table\"\n line 7, column 10: bare \" in non-quoted-field\n exit status 1\n\n % go run csv-test.go -trim_leading_space -lazy_quotes <evil.csv\n 1 - 0:\"character\" 1:\"quote\"\n 2 - 0:\"Inigo\" 1:\"You killed my father\\nDarth, I am your father\"\n 3 - 0:\"Buddha\" 1:\"\"\n 4 - 0:\"Dada\" 1:\"'dodo'\"\n 5 - 0:\"Evil Guy\" 1:\"\\\";drop table\"\n 6 - 0:\"Expert\" 1:\"Trust me, I'm an expert\"\n 7 - 0:\"Balmer\" 1:\"\\\"Developers, Developers\\\"\"\n 8 - 0:\"Yoda๏ผŒDo๏ผŒdo not๏ผŽdo not try\"\n 9 - 0:\"Me\" 1:\"๏ผ‚Do not\"\n 10 - 0:\"quote me\" 1:\"please๏ผ‚\"\n\nPython\n------\n\n % ./csv-test.py <evil.csv\n 1 - 0:\"character\" 1:\"quote\"\n 2 - 0:\"Inigo\" 1:\"You killed my father\\nDarth, I am your father\"\n 3 - 0:\"Buddha\" 1:\"\"\n 4 - 0:\"Dada\" 1:\"'dodo'\"\n 5 - 0:\"Evil Guy\" 1:\"\";drop table\"\n 6 - 0:\"Expert\" 1:\" \"Trust me\" 2:\" I'm an expert\"\"\n 7 - 0:\"Balmer\" 1:\"\"Developers, Developers\"\"\n 8 - 0:\"Yoda๏ผŒDo๏ผŒdo not๏ผŽdo not try\"\n 9 - 0:\"Me\" 1:\"๏ผ‚Do not\"\n 10 - 0:\"quote me\" 1:\" please๏ผ‚\"\n 11 -\n\n\nReferences/Thx\n--------------\n\nhttp://tools.ietf.org/html/rfc4180\n\nMatthias Wiesmann https://wiesmann.codiferes.net/wordpress/archives/19862 for his evil.csv input\n" } ]
3
olegbuevich/storm-indicator-pyqt
https://github.com/olegbuevich/storm-indicator-pyqt
0b3f4bfff14b979195c2e054650bd5e7e06cc223
4c4d9e9c9c5c0681476c23f9fef7ee420d4e7303
f8c4904b0b11736ebb4b89f334681cdfcef69cd1
refs/heads/master
2020-04-09T07:59:23.606933
2018-12-05T22:14:56
2018-12-05T22:14:56
160,178,306
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7865168452262878, "alphanum_fraction": 0.7865168452262878, "avg_line_length": 10.125, "blob_id": "96c4e2ce8afebdd22258f36acbcdae80ae3f9f3b", "content_id": "1097098ccc6c221b6803917eda4bd29e23f6488b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 89, "license_type": "permissive", "max_line_length": 30, "num_lines": 8, "path": "/requirements-dev.txt", "repo_name": "olegbuevich/storm-indicator-pyqt", "src_encoding": "UTF-8", "text": "# Runtime requirements\n--requirement requirements.txt\n\n# Linting\npylint\n\n# package\ntwine\n" }, { "alpha_fraction": 0.728314220905304, "alphanum_fraction": 0.7315875887870789, "avg_line_length": 21.629629135131836, "blob_id": "ee6fdc7076005c1c1f9a4e108f62ee9c8344374d", "content_id": "6839257bbf6c452f30ee52c1307583475739e0cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 611, "license_type": "permissive", "max_line_length": 224, "num_lines": 27, "path": "/README.md", "repo_name": "olegbuevich/storm-indicator-pyqt", "src_encoding": "UTF-8", "text": "# storm-indicator-pyqt\n\nPyQt based indicator for connecting to your SSH hosts easily.\n\n**storm-indicator-pyqt** uses [~/.ssh/config](http://linux.die.net/man/5/ssh_config) files to list SSH connections. If you don't use your SSH config file yet, you can optionally use [storm](http://www.github.com/emre/storm)\nto easily add your servers.\nBased on [emre/storm-indicator](https://github.com/emre/storm-indicator)\n\n## requirements\n\n* envparse\n* PyQt5\n* stormssh\n\n## installation\n\n```bash\npython setup.py install\n```\n\n## running\n\n```bash\nssh-indicator-pyqt\n# or\nSHELLEM=\"gnome-terminal --\" ssh-indicator-pyqt\n```\n" }, { "alpha_fraction": 0.608517587184906, "alphanum_fraction": 0.6134316325187683, "avg_line_length": 23.918367385864258, "blob_id": "c4769a7d6b1c5ad40c1454d999a28baca89e3d31", "content_id": "28c4b13f9e382811231dc83b2047f266d1bd19ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "permissive", "max_line_length": 86, "num_lines": 49, "path": "/setup.py", "repo_name": "olegbuevich/storm-indicator-pyqt", "src_encoding": "UTF-8", "text": "from os import path\nfrom setuptools import setup\n\n\nHERE = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(HERE, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n\nsetup(\n name='storm-indicator-pyqt',\n version='1.2.1',\n description='PyQt based indicator for connecting to your SSH connections easily.',\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n\n url='https://github.com/olegbuevich/storm-indicator',\n\n license='MIT',\n\n author='Oleg Buevich',\n author_email='[email protected]',\n\n packages=['storm_indicator_pyqt'],\n\n entry_points={\n 'console_scripts': [\n 'ssh-indicator-pyqt=storm_indicator_pyqt.__main__:main',\n ]\n },\n\n package_data={'storm_indicator_pyqt': ['icons/tray.svg']},\n\n install_requires=[\n \"stormssh\",\n \"PyQt5\",\n \"envparse\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Topic :: System :: Systems Administration\"\n ]\n)\n" } ]
3
codeforkobe/codeforkobe.github.io
https://github.com/codeforkobe/codeforkobe.github.io
ae13671e8e0332b8946fcf66b68b86dcd3386da4
19f2a118049c1cede1551cddbe055c4f3373a6c1
1b9052a12dad5c0510d81c57a169ee6119a04c70
refs/heads/master
2021-01-18T23:12:08.949715
2020-01-16T10:56:42
2020-01-16T10:56:42
42,661,863
1
1
null
2015-09-17T14:44:17
2020-01-16T10:57:12
2020-01-16T10:57:10
HTML
[ { "alpha_fraction": 0.6940194964408875, "alphanum_fraction": 0.7318497896194458, "avg_line_length": 20.179012298583984, "blob_id": "0404f9be08ac44c3056b3808288b2a2845806102", "content_id": "9d8dd3f518f0bd1fa4d756b8b2a621cc60090afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6381, "license_type": "no_license", "max_line_length": 538, "num_lines": 162, "path": "/_posts/2017-03-16-meeting26.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš26th\r\ndate: 2017-03-16 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใƒ„ใ‚ขใƒผๅคงๅ ฑๅ‘Šไผš(ๆ˜Žไธปใ€็ง‹ๅฑฑใ€่ฅฟๆ‘ใ€ๅคงๅนณ)\r\n (2)ใƒใƒฃใƒฌใƒณใ‚ธ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ ฑๅ‘Š(้ซ˜ๆฉ‹)\r\n (3)็ฅžๆˆธๅธ‚็ตฆ้ฃŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๆœ‰ๅŠนๆดป็”จ(ๅพŒ่—ค)\r\n (4)็œŒๆ”ฟ150ๅ‘จๅนด่จ˜ๅฟตๅ…ˆ่กŒไบ‹ๆฅญ\"HYOGO150\"ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใ‚ทใƒณใƒใ‚ธใ‚ฆใƒ ๅ‘Š็Ÿฅ(ๆœจๆ‘)\r\n (5)ใใฎไป–็พๅœจ่ชฟๆ•ดไธญ๏ผ†ๅ‹Ÿ้›†ไธญ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1875944912682771/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-26th-meeting--Adrcd6lB7VgVnJUuH9rgbkh9AQ-XMRsMdAERomyBVHEa4STG)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“ใƒฌใƒใƒผใƒˆ](http://masaki-ravens.com/main/blog/everythingispractice/?p=1340)\r\n\r\nๅ ดๆ‰€:[ใ‚นใƒšใƒผใ‚นใ‚ขใƒซใƒ•ใ‚กไธ‰ๅฎฎ](http://www.spacealpha.jp/sannomiya/access.html)\r\n\r\nใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n+ ใ€œไนพๆฏใ€œ\r\n+ (1)ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใƒ„ใ‚ขใƒผๅคงๅ ฑๅ‘Šไผš(ๆ˜Žไธปใ€็ง‹ๅฑฑใ€่ฅฟๆ‘ใ€ๅคงๅนณ)\r\n+ (2)ใƒใƒฃใƒฌใƒณใ‚ธ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ ฑๅ‘Š(้ซ˜ๆฉ‹)\r\n+ (3)็ฅžๆˆธๅธ‚็ตฆ้ฃŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๆœ‰ๅŠนๆดป็”จ(ๅพŒ่—ค)\r\n+ (4)็œŒๆ”ฟ150ๅ‘จๅนด่จ˜ๅฟตๅ…ˆ่กŒไบ‹ๆฅญ\"HYOGO150\"ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใ‚ทใƒณใƒใ‚ธใ‚ฆใƒ ๅ‘Š็Ÿฅ(ๆœจๆ‘)\r\n+ (5)ใใฎไป–็พๅœจ่ชฟๆ•ดไธญ๏ผ†ๅ‹Ÿ้›†ไธญ\r\n+ ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n+ ้ฃ›ใณๅ…ฅใ‚Šๆญ“่ฟŽ๏ผ(^^)\r\n\r\n# ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใƒ„ใ‚ขใƒผๅคงๅ ฑๅ‘Šไผš\r\n(ๆ˜Žไธปใ€็ง‹ๅฑฑใ€่ฅฟๆ‘ใ€ๅคงๅนณ)\r\n\r\n## ๆฆ‚่ฆ\r\nๆ˜Žไธปใ•ใ‚“๏ผ ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒชใƒณใ‚ฏ\r\n\r\n็ฅžๆˆธๅธ‚ใฎใƒ—ใƒญใ‚ฐใƒฉใƒ ใงใ€ๅญฆ็”Ÿใ‚’ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใซ้€ฃใ‚Œใฆใ„ใใƒ—ใƒญใ‚ฐใƒฉใƒ ใฎ้‹ๅ–ถใ‚’ๅฎŸๆ–ฝใ€‚\r\n\r\n็ฅžๆˆธๅธ‚ใŒๅ–ใ‚Š็ต„ใ‚€้ …็›ฎใจใ—ใฆๆฌกใฎใ‚‚ใฎใŒใ‚ใ‚‹\r\n\r\n- ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ๆ”ฏๆด\r\n- ใ‚จใ‚ณใ‚ทใ‚นใƒ†ใƒ ๆง‹็ฏ‰\r\n\r\nใƒ—ใƒญใ‚ฐใƒฉใƒ ใจใ—ใฆใฏๆฌกใฎใ‚‚ใฎใŒใ‚ใ‚‹\r\n\r\n- Kobe global startup gatway\r\n- 500kobe pre-acceleration\r\n- ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผไบคๆต่‚ฒๆˆใƒ—ใƒญใ‚ฐใƒฉใƒ  โ†ใ‚ณใƒฌ http://kobe-siliconvalley.com/\r\n\r\n็ด„ 20 ๅใฎๅญฆ็”Ÿใ‚’ๅผ•็އใ—ใŸใ€‚ๅ†…ๅฎนใจใ—ใฆใฏๆฌกใฎใ‚ˆใ†ใชใ‚‚ใฎ\r\n\r\n- ่ตทๆฅญๅฎถใƒžใ‚คใƒณใƒ‰ใฎ้†ธๆˆ\r\n- ใƒ“ใ‚ธใƒใ‚นใƒ—ใƒฉใƒณใฎใƒ–ใƒฉใƒƒใ‚ทใƒฅใ‚ขใƒƒใƒ—\r\n- ใƒ”ใƒƒใƒใ‚นใ‚ญใƒซใ‚’่บซใซ็€ใ‘ใ‚‹\r\n\r\n่จชๅ•ๅ…ˆใฏๆฌก๏ผšใชใ‚‹ในใๆž ใซใจใ‚‰ใ‚ใ‚Œใชใ„่จชๅ•ๅ…ˆใ‚’้ธใ‚“ใงไบคๆตใ—ใฆใใŸใ€‚\r\n- [Computer History museum](http://www.computerhistory.org/)\r\n- [Runway](http://www.runway.is/), Code for America, Pinterest\r\n- Google, Stanfordๅคงๅญฆ\r\n- Yahoo, Runway\r\n- 500 startups\r\n\r\nCode for America ใฎใƒ—ใƒญใƒ€ใ‚ฏใƒˆ๏ผšhttps://www.codeforamerica.org/resources/product-and-how-to-library ใซใคใ„ใฆๆ•™ใˆใฆใ‚‚ใ‚‰ใฃใŸใ‚Š\r\nๆณ•ๅˆถๅบฆใ‚’ใ†ใพใๆดป็”จใงใใฆใ„ใชใ„ใ‹ใŸใซใ‚นใƒžใƒผใƒˆใƒ•ใ‚ฉใƒณใงใฎใ‚ขใ‚ฏใ‚ปใ‚นๆ‰‹ๆฎตใ‚’ๆไพ›ใ™ใ‚‹ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใ‚‚\r\nRocketSpace ใซๅ…ฅใฃใฆใ„ใ‚‹ๆ–นใซใฏใ„ใฃใŸใ‚Š\r\nTwitter ใฎไบบใซไผšใฃใฆใฟใŸใ‚Š\r\n\r\n## ไฝ“้จ“ใซใคใ„ใฆ\r\n\r\n็ง‹ๅฑฑใ•ใ‚“๏ผ ไบฌๅคง ็”Ÿๅ‘ฝ็ง‘ๅญฆ็ ”็ฉถ็ง‘\r\n\r\nใ€ŒDeep learning ใงๆ‰‹่ฉฑใฎ็ฟป่จณใ€\r\n\r\nๅ„ฒใ‹ใ‚‹ใ‹ใฉใ†ใ‹ใ‚ˆใ‚Šใ‚‚ใ€ไป–็คพใซ็งปใฃใฆใ‚‚ใ‚„ใฃใฆใ‚‹ไป•ไบ‹ใฎไพกๅ€คใŒใใกใ‚“ใจ็™บๆฎใ•ใ‚Œใ‚‹ใ€ใจใ„ใ†่ฆ–็‚นใงไป•ไบ‹ใ‚’ใ—ใฆใ„ใ‚‹ใ“ใจใŒๅฐ่ฑก็š„ใ ใฃใŸใ€‚\r\nใ‚‚ใจใ‚‚ใจไบฌๅคงใฎ[GTEP](https://www.gsm.kyoto-u.ac.jp/gtep/)ใงๅ‡บใ—ใฆใ„ใŸใ‚‚ใฎใ‚’ใ‚‚ใฃใฆใ„ใฃใŸใ€‚\r\nๅฐ‚้–€ๆ€งใฎ็ตŒ้จ“ๅ€คใ‚’้‡่ฆ่ฆ–ใ•ใ‚ŒใฆใŸใ€‚\r\n\r\nใ‚ญใƒผใƒฏใƒผใƒ‰ใ€Œไบบใฎ็ธใ€\r\n\r\nใƒ”ใƒƒใƒใง้‡่ฆ่ฆ–ใ•ใ‚ŒใŸใ‚‚ใฎใ€Œใƒใƒผใƒ ใ€๏ผˆไพ‹๏ผšใƒ“ใƒƒใ‚ฏใƒ‡ใƒผใ‚ฟๆŒใฃใฆใ‚‹ใฎ๏ผŸ๏ผ‰\r\n\r\nใ€Œ่‹ฑ่ชžใฎๅฃใ€\r\n\r\n## ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผๆˆๆžœๅ ฑๅ‘Š\r\n\r\n่ฅฟๆ‘ใ•ใ‚“๏ผ ้˜ชๅคง ใ‚ทใ‚นใƒ†ใƒ ็ง‘ๅญฆ็ง‘\r\n\r\nใƒญใƒœใƒƒใƒˆใƒ™ใƒณใƒใƒฃใƒผใงใ‚ขใƒซใƒใ‚คใƒˆไธญ\r\n\r\n- ใใฃใ‹ใ‘ใฏใƒใƒƒใ‚ซใ‚ฝใƒณ๏ผˆ็ฅžๆˆธๅธ‚ใฎไบบใซๅฃฐใ‹ใ‘ใ•ใ‚ŒใŸ๏ผ‰\r\n- ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใ‚’่ฆ‹ใฆใฟใŸใ‹ใฃใŸ\r\n- ใชใœใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผใŒใ™ใ”ใ„ใฎใ‹ใ‚’็Ÿฅใ‚ŠใŸใ‹ใฃใŸ\r\n\r\n1้€ฑ้–“ๅปถ้•ทใ—ใฆๆปžๅœจใ—ใฆใ€ใ‚คใƒ™ใƒณใƒˆใซๅ‚ๅŠ \r\n\r\n- ใ€ŒBots and Beerใ€ https://svrobo.org/bots-beer/\r\n- ใ€ŒJapanese pitch nightใ€http://nedo-tcp.jp/event/pitch/20170216.html\r\n\r\nๆ„Ÿใ˜ใŸใ“ใจ\r\n\r\n- ใ‚ฟใ‚คใƒ ใ‚นใ‚ฑใƒผใƒซใฎ็Ÿญใ•\r\n- ๆŒ‘ๆˆฆใจๅคฑๆ•—\r\n- ไบบๅŒๅฃซใฎใคใชใŒใ‚Š\r\n\r\n## Code for Kobe \r\n\r\nๅคงๅนณใ•ใ‚“๏ผ ้˜ชๅคง ๅŸบ็คŽๅทฅๅญฆ้ƒจ\r\n\r\n็Ÿฅ่ƒฝใ‚ทใ‚นใƒ†ใƒ ๅญฆใ‚ณใƒผใ‚น\r\n\r\nใ‚ขใƒกใƒชใ‚ซใ‚’้€šใ—ใฆๆ—ฅๆœฌใ‚’็Ÿฅใ‚ŒใŸใ€‚\r\n\r\n- ๆ—ฅๆœฌไบบใฎๅ•้กŒๆ„่ญ˜๏ผšใ€Œไฝ•ใŒๅ•้กŒใชใฎใ‹ใ€ใจ่จ€ใ‚ใ‚ŒใŸใจใใซใ€ๆญฃใ—ใ„ใƒใ‚คใƒณใƒˆใ‚’่ฆ‹ๅ‡บใ—ใซใใ„\r\n- ใ€Œไบบ้–“ใ‚‰ใ—ใ„ใ‚ทใ‚นใƒ†ใƒ ใ€ใ‚’ๆฑ‚ใ‚ใ‚‹ใ‚ˆใ‚Šใ‚‚ใ€ไพ‹ใˆใฐ google ใงใ‚‚ใ€Œๅฝนใซ็ซ‹ใคใ‹ใฉใ†ใ‹ใ€ใŒ้‡่ฆ–ใ•ใ‚Œใฆใ„ใŸ\r\n- ๆ™‚้–“ใฏ่ฒด้‡\r\n- ใ€Œ็งใŸใกใฏไธ–็•Œใ‚’ๅค‰ใˆใ‚‰ใ‚Œใ‚‹ใ€ใจใ„ใ†ๅฎŸๆ„Ÿใ‚’ๆŒใฃใฆใ„ใ‚‹ใ‹\r\n\r\n# ใƒใƒฃใƒฌใƒณใ‚ธ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ ฑๅ‘Š\r\n(้ซ˜ๆฉ‹)\r\n\r\n[ใƒใ‚นใ‚ฟใƒผใ‚ปใƒƒใ‚ทใƒงใƒณ](http://park.itc.u-tokyo.ac.jp/padit/cog2016/area/kinki.html#sanda-shi)ใงใƒ—ใƒฌใ‚ผใƒณใƒ†ใƒผใ‚ทใƒงใƒณใ—ใฆใใพใ—ใŸ\r\n\r\n\r\n็™บ่กจใฎๅฝขๆ…‹ใฏๆฌกใฎใ‚ตใ‚คใ‚ฏใƒซใ‚’็นฐใ‚Š่ฟ”ใ™ๅฝขใ ใฃใŸ\r\n\r\n- ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ข 7 ๅˆ†\r\n- ่‡ชๆฒปไฝ“่ชฌๆ˜Ž 3 ๅˆ†\r\n- ่ณช็–‘ๅฟœ็ญ” 10 ๅˆ†\r\n\r\nๆ„Ÿๆƒณ\r\n\r\n- ็™บ่กจใ—ใŸ็ต„ใฏๅฎŸ้š›ใซๆ‰‹ใ‚’ๅ‹•ใ‹ใ—ใฆใ„ใ‚‹ไบบใŸใก\r\n- ใพใ ใพใ ้ŽๆธกๆœŸใƒปๆ‰‹ๆŽขใ‚Š\r\n- ่ชฐใŒใ‚„ใ‚‹ใฎ๏ผŸใฟใŸใ„ใชใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใ‚‚\r\n\r\n# ็ฅžๆˆธๅธ‚็ตฆ้ฃŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๆœ‰ๅŠนๆดป็”จ\r\n\r\n(ๅพŒ่—ค)\r\n\r\n<iframe src=\"//www.slideshare.net/slideshow/embed_code/key/1mlnf0iaCvpSM8\" width=\"595\" height=\"485\" frameborder=\"0\" marginwidth=\"0\" marginheight=\"0\" scrolling=\"no\" style=\"border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;\" allowfullscreen> </iframe> <div style=\"margin-bottom:5px\"> <strong> <a href=\"//www.slideshare.net/masaki52/ss-73212589\" title=\"็ฅžๆˆธๅธ‚็ตฆ้ฃŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๆœ‰ๅŠนๆดป็”จ\" target=\"_blank\">็ฅžๆˆธๅธ‚็ตฆ้ฃŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๆœ‰ๅŠนๆดป็”จ</a> </strong> from <strong><a target=\"_blank\" href=\"//www.slideshare.net/masaki52\">Masaki Goto</a></strong> </div>\r\n\r\nใ€ŒLINE ใง่žใใจใ€ไปŠๆ—ฅใฎ็Œฎ็ซ‹ใ‚’ๆ•™ใˆใฆใใ‚Œใ‚‹ใ‚ขใƒ—ใƒชใ€ใจใ„ใ†ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ข\r\n\r\nAmazon backend ใง LINE bot ใ‚’ๅฎŸ่ฃ…ใ—ใŸใ€‚\r\n\r\nไฝฟใ„ๅฟƒๅœฐใ‚’่ชฟๆŸปใ—ใฆใฟใŸใ„ใ€‚\r\n\r\nใงใใชใ„ best ใ‚ˆใ‚Šใ‚‚ใงใใ‚‹ better ใ‚’\r\n\r\n# ็œŒๆ”ฟ150ๅ‘จๅนด่จ˜ๅฟตๅ…ˆ่กŒไบ‹ๆฅญ\"HYOGO150\"ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใ‚ทใƒณใƒใ‚ธใ‚ฆใƒ ๅ‘Š็Ÿฅ\r\n\r\n(ๆœจๆ‘)\r\n\r\nVR ใฎๅฑ•็คบใ‚„ใฃใฆใ‚‹ใฎใง่ฆ‹ใฆใใ ใ•ใ„๏ผ\r\n\r\n[ๅ‘Š็Ÿฅใƒšใƒผใ‚ธ](https://web.pref.hyogo.lg.jp/kk08/event/hyogo150.html)\r\n\r\n@ๅ…ตๅบซ็œŒๅ…ฌ้คจ๏ผˆ็œŒๅบๅ‰๏ผ‰ไบ‹ๅ‰็”ณใ—่พผใฟใŒๅฟ…่ฆใ€‚\r\n\r\n" }, { "alpha_fraction": 0.6942038536071777, "alphanum_fraction": 0.7459471225738525, "avg_line_length": 16.676347732543945, "blob_id": "5f5f67dcaa116bcfd0783300e14c860607eba0d4", "content_id": "bc3ae445dac9137ee7b279f82c3f6396d1c572df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9034, "license_type": "no_license", "max_line_length": 111, "num_lines": 241, "path": "/_posts/2017-10-19-meeting32.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš32nd\r\ndate: 2017-10-19 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)่Šฆๅฑ‹ๅธ‚:3Dwork็ ”(็ญ’ไบ•)10ๅˆ†\r\n (2)ใ‚ฐใƒฉใƒฌใ‚ณ้šŠๆŒฏใ‚Š่ฟ”ใ‚Š(ใ‚ขใ‚ชใƒŠใƒŸ)10ๅˆ†\r\n (3)Urban Innovation Kobe(ๆพๆ‘)10ๅˆ†\r\n (4)QQ็ฅžๆˆธ(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)10ๅˆ†\r\n (5)็ฅžๆˆธๅธ‚่ทๅ“กๆœ‰ๅฟ—ๅญฆ็ฟ’ๆ”ฏๆดไบ‹ๆฅญ(ๅฐๆž—)10ๅˆ†\r\n (6)ๅœฐ็†ๆƒ…ๅ ฑใ‚’ไฝฟใฃใŸ้›ชๅฎณ่ปฝๆธ›(ๆพค็”ฐ)10ๅˆ†\r\n (7)Code for ้ธๆŒ™(ๅทไบ•)10ๅˆ†\r\n (8)ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ๅ‚ๅŠ ่€…ๅ‹Ÿ้›†(่ฅฟ่ฐท)10ๅˆ†\r\n (9)Fw:ๆฑๅŒ—็ฌฌ6ๅ›žๅ…ฑๅ‰ตใ‚คใƒ™ใƒณใƒˆ ใ€Žใ‚นใƒใƒผใƒ„ใ‚’้€šใ˜ใฆใ€ใƒ€ใ‚คใƒใƒผใ‚ทใƒ†ใ‚ฃใ‚’ๅฎŸ็พใ™ใ‚‹ใพใกใฅใใ‚Šใ€(10ๆœˆ28-29ๆ—ฅin็ฅžๆˆธ)ๅ‘Š็Ÿฅ(็‰ง)10ๅˆ†\r\n (10)ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใฎๆบๆตใ‚’ๆŽขใ‚‹ใƒผๆ—ฅๆœฌใฎๅœฐๅŸŸๆƒ…ๅ ฑๅŒ–ใฎ็ณป่ญœ(ๅ’ŒๅดŽ)30ๅˆ†\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/366274663805469/)\r\n/ [PaperๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-32nd-meeting-d0z41i60fpM0k0L2I6Bq7)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=3305)]\r\n\r\n\r\n็ฌฌ32ๅ›žCode for Kobeๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผไผšๅ ดใฏๅพฉๆดปใ—ใŸใ‚ตใƒณใƒ‘ใƒซใซๆ–ฐใŸใซใ‚ชใƒผใƒ—ใƒณใ—ใŸใ€Œ่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”ใ€ใงใ™๏ผ๏ผ\r\n\r\nๅคง็››ๆณใฎๅ†…ใซ็ต‚ไบ†ใ—ใŸCode for Japan Summit 2017 \"Borderless\"ใ‚’ๅ—ใ‘ใ€ใ€Œใ‚ตใƒŸใƒƒใƒˆใฎๅ‘ใ“ใ†ๅดใ€ใจ้กŒใ—ใฆใ€ใ“ใ‚Œใ‹ใ‚‰ๆˆ–ใ„ใฏๆ—ขใซๅ‹•ใๅง‹ใ‚ใฆใ„ใ‚‹ใใ‚Œใžใ‚Œใฎใ‚ขใ‚ฏใ‚ทใƒงใƒณใ‚’ๅ‡บใ—ๅˆใฃใฆใ„ใใŸใ„ใจๆ€ใ„ใพใ™ใ€‚\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n([http://www.kigyoplaza-hyogo.jp/](http://www.kigyoplaza-hyogo.jp/))\r\n\r\n2.ใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (1)่Šฆๅฑ‹ๅธ‚:3Dwork็ ”(็ญ’ไบ•)10ๅˆ†\r\n- (2)ใ‚ฐใƒฉใƒฌใ‚ณ้šŠๆŒฏใ‚Š่ฟ”ใ‚Š(ใ‚ขใ‚ชใƒŠใƒŸ)10ๅˆ†\r\n- (3)Urban Innovation Kobe(ๆพๆ‘)10ๅˆ†\r\n- (4)QQ็ฅžๆˆธ(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)10ๅˆ†\r\n- (5)็ฅžๆˆธๅธ‚่ทๅ“กๆœ‰ๅฟ—ๅญฆ็ฟ’ๆ”ฏๆดไบ‹ๆฅญ(ๅฐๆž—)10ๅˆ†\r\n- (6)ๅœฐ็†ๆƒ…ๅ ฑใ‚’ไฝฟใฃใŸ้›ชๅฎณ่ปฝๆธ›(ๆพค็”ฐ)10ๅˆ†\r\n- (7)Code for ้ธๆŒ™(ๅทไบ•)10ๅˆ†\r\n- (8)ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ๅ‚ๅŠ ่€…ๅ‹Ÿ้›†(่ฅฟ่ฐท)10ๅˆ†\r\n- (9)Fw:ๆฑๅŒ—็ฌฌ6ๅ›žๅ…ฑๅ‰ตใ‚คใƒ™ใƒณใƒˆ ใ€Žใ‚นใƒใƒผใƒ„ใ‚’้€šใ˜ใฆใ€ใƒ€ใ‚คใƒใƒผใ‚ทใƒ†ใ‚ฃใ‚’ๅฎŸ็พใ™ใ‚‹ใพใกใฅใใ‚Šใ€(10ๆœˆ28-29ๆ—ฅin็ฅžๆˆธ)ๅ‘Š็Ÿฅ(็‰ง)10ๅˆ†\r\n- (10)ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใฎๆบๆตใ‚’ๆŽขใ‚‹ใƒผๆ—ฅๆœฌใฎๅœฐๅŸŸๆƒ…ๅ ฑๅŒ–ใฎ็ณป่ญœ(ๅ’ŒๅดŽ)30ๅˆ†\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n\r\n\r\n# ่Šฆๅฑ‹ๅธ‚:3Dwork็ ”\r\n\r\n็ญ’ไบ•ใ•ใ‚“\r\n\r\nไธ€ไบบๅฝ“ใŸใ‚ŠใฎๅŽๅ…ฅใฏ็ขบใ‹ใซ้ซ˜ใ„ใ‘ใ‚Œใฉใ‚‚ใ€็พๅœจใงใฏ็‰นใซ็ชๅ‡บใ—ใŸใ‚‚ใฎใงใฏ็„กใใชใฃใฆใ„ใฆใ„ใ‚‹ใ€‚\r\n้œ‡็ฝๆ™‚ใฎๅ€Ÿ้‡‘ใŒใพใ ใพใ ๆฎ‹ใฃใฆใ„ใ‚‹\r\n\r\n\r\nๅธ‚ๅฝนๆ‰€ๅ†…ใฎใ€Œ่‡ชไธป็ ”็ฉถใ‚ฐใƒซใƒผใƒ—ๆดปๅ‹•ๅŠฉๆˆใ€ๅˆถๅบฆใ‚’ๅˆฉ็”จใ—ใฆใ€Œ:3Dwork็ ”ใ€\r\nใ€Œใƒ‡ใƒผใ‚ฟๆดป็”จใ€ใƒ‡ใ‚ธใ‚ฟใƒซๆŠ€่ก“ใ€ใƒ‡ใ‚ถใ‚คใƒณๆŒ‡ๅ‘ใ€ใ‚’ๅ–ใฃใฆ 3D ใจใ„ใ†ใ“ใจใซใชใฃใฆใ„ใ‚‹ใ€‚\r\nใ€Œใƒฏใ‚ฏใƒฏใ‚ฏ้‡่ฆ–ใ€ใฎๆดปๅ‹•\r\n\r\n็›ฎ็š„\r\n\r\n- ่ทๅ ดๅ†…ใงใฎใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใฅใใ‚Š\r\n- ไบบใจๆƒ…ๅ ฑใฎใ‚คใƒณใƒ—ใƒƒใƒˆ\r\n- ๅ„่‡ชใฎใ‚ขใ‚ฏใ‚ทใƒงใƒณใซ็น‹ใŒใ‚‹ใจใ„ใ„ใช\r\n\r\nCfJใ‚ตใƒŸใƒƒใƒˆๅ…ฑๆœ‰ไผšใ€‚ใ‚ฐใƒฉใƒฌใ‚ณใฎ็ดนไป‹ใ€‚\r\nๆฃฎ็”บใงไฝœใ‚‰ใ‚ŒใŸใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟ้ธๆŒ™ใƒใ‚นใ‚ฟใƒผใฎๆดป็”จ๏ผˆHPใฎใ‚ญใƒฃใƒƒใƒ๏ผ‰\r\n\r\nใ‚ฒใ‚นใƒˆๅ‹Ÿ้›†ใ—ใฆใ„ใพใ™๏ผ\r\n\r\nใ€Œๅนณๆˆ 30, 31 ๅนดๅบฆ็ซถไบ‰ๅ…ฅๆœญ็ญ‰ใซๅ‚ๅŠ ใ™ใ‚‹ใŸใ‚ใฎ็”ณ่ซ‹ใซใคใ„ใฆใ€\r\nhttp://www.city.ashiya.lg.jp/keiyaku/touroku_30_31.html\r\nใฏ็™ป้Œฒใ—ใฆใใ ใ•ใ„๏ผ\r\n\r\n\r\n# ใ‚ฐใƒฉใƒฌใ‚ณ้šŠๆŒฏใ‚Š่ฟ”ใ‚Š\r\n\r\nใ‚ขใ‚ชใƒŠใƒŸ\r\n\r\nใƒ“ใ‚ธใƒฅใ‚ขใƒซใƒ•ใ‚กใ‚ทใƒชใƒ†ใƒผใ‚ฟใƒผ๏ผˆๅ‰ฏๆฅญ๏ผ‰\r\nไธ‰ใƒŽๅฎฎใง็คพๅ†…ใ‚ทใ‚นใƒ†ใƒ ใ‚ตใƒใƒผใƒˆใจใ‹ใƒฆใƒผใ‚ถใƒผใฎ็›ธ่ซ‡ใซไน—ใ‚‹ใจใ‹๏ผˆๆœฌๆฅญ๏ผ‰\r\nใƒ•ใ‚กใ‚ทใƒชใƒ†ใƒผใ‚ทใƒงใƒณๆ–‡ๅ…ทๆกˆๅ†…ใ€€http://facigraworks.hatenablog.com/\r\n\r\n\r\nใ‚ฐใƒฉใƒ•ใ‚ฃใƒƒใ‚ฏใƒฌใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’ CfJ SUMMIT 2017 ใงใ‚„ใ‚Šใพใ—ใŸใ€‚\r\n\r\nใ‚ฐใƒฉใƒ•ใ‚ฃใƒƒใ‚ฏใƒฌใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใจใฏ๏ผš\r\n\r\n- ไผš่ญฐใฎๅ†…ๅฎนใ‚’่žใๅ–ใฃใฆใ€ๅฏ่ฆ–ๅŒ–ใ™ใ‚‹\r\n- ใƒ•ใ‚กใ‚ทใƒชใƒ†ใƒผใ‚ทใƒงใƒณใ‚ฐใƒฉใƒ•ใ‚ฃใƒƒใ‚ฏใ‚‚ใ“ใฎใ‚ซใƒ†ใ‚ดใƒชใซๅ…ฅใ‚‹\r\n- CfJ Summit ใฎ่ญฐไบ‹้Œฒใƒšใƒผใ‚ธใซ่ฒผใฃใฆใ‚ใ‚‹ใฎใงใ”่ฆงใใ ใ•ใ„\r\n - ๆจก้€ ็ด™65ๆžš\r\n\r\nใ‚ฐใƒฉใƒฌใ‚ณ้šŠใฃใฆใฉใ‚“ใชไบบใŸใก๏ผŸ\r\n\r\n- ็คพไผšไบบใจๅญฆ็”ŸๅŠใ€…ใใ‚‰ใ„ใ€‚\r\n\r\nใ‚ˆใใงใใŸใจใ“ใ‚\r\n\r\n- ๅญฆ็”Ÿ๏ผ†็คพไผšไบบใฎๆททๆˆใƒใƒผใƒ \r\n - ๅนด้ฝขใซใจใ‚‰ใ‚ใ‚ŒใšๅŠฉใ‘ๅˆใˆใ‚‹ใƒใƒผใƒ \r\n - ๅˆ‡็ฃ‹็ข็ฃจใฎๅ ดใซใชใฃใฆใ„ใŸ\r\n- ่‡จๆฉŸๅฟœๅค‰ใชๅ‹•ใๆ–น\r\n - 1ๆ—ฅ็›ฎ็ต‚ไบ†ๅพŒ 2 ๆ—ฅ็›ฎใซๅ‘ใ‘ใฆใ‚ปใƒƒใ‚ทใƒงใƒณใฎ็ต„ใฟ็›ดใ—ใ‚’ใงใใŸ\r\n- ่‹ฑ่ชžใฎใ‚ปใƒƒใ‚ทใƒงใƒณใ‚’ใƒฌใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใงใใŸใ“ใจ\r\n - ๅŒๆ™‚้€š่จณใคใใ‚ปใƒƒใ‚ทใƒงใƒณ\r\n - ๅฎŒๅ…จ่‹ฑ่ชžใฎใ‚ปใƒƒใ‚ทใƒงใƒณ\r\n\r\nใ€Œ้–ข่ฅฟใงใƒ•ใ‚กใ‚ทใƒชใƒ†ใƒผใ‚ทใƒงใƒณใชๅฏ่ฆ–ๅŒ–ใ‚’ๆทฑใ‚ใ‚‹ไผšใ€facebook group\r\nhttps://www.facebook.com/kfkfkai/\r\n\r\n\r\n# Urban Innovation Kobe\r\n\r\nๆพๆ‘\r\n\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\nhttps://web.pref.hyogo.lg.jp/sr10/kigyoplaza.htmlใ€€(ๅ…ตๅบซ็œŒH๏ผฐ)\r\nhttp://www.kigyoplaza-hyogo.jp/ใ€€(ๅ…ฌๅผใ‚ตใ‚คใƒˆ)\r\nๅ…ตๅบซ็œŒใจ๏ผˆๅ…ฌ่ฒก๏ผ‰ใฒใ‚‡ใ†ใ”็”ฃๆฅญๆดปๆ€งๅŒ–ใ‚ปใƒณใ‚ฟใƒผใ‹ใ‚‰NPOๆณ•ไบบใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒชใƒณใ‚ฏใŒๅง”่จ—ใ‚’ๅ—ใ‘ใฆ้‹ๅ–ถใ—ใฆใ„ใพใ™๏ผ\r\nใ‚ณใƒฏใƒผใ‚ญใƒณใ‚ฐใŒ5000ๅ††ใชใฎใซใ€ๅ€‹ไบบใงไฝฟใˆใ‚‹ใƒฏใƒผใ‚ญใƒณใ‚ฐใƒ‡ใ‚นใ‚ฏใŒๆœˆ้ก7500ๅ††๏ผ๏ผ\r\nใœใฒใŠ่ถŠใ—ใใ ใ•ใ„ใ€‚่ˆˆๅ‘ณใฎใ‚ใ‚‹ๆ–นใฏใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒชใƒณใ‚ฏใฎใƒกใƒณใƒใƒผใพใงใ€‚\r\n\r\nใ‚ตใƒณใƒ•ใƒฉใƒณใ‚ทใ‚นใ‚ณโ€œStartup in Residenceโ€ ใฎ็ฅžๆˆธ็‰ˆ\r\nใ€Œ่ซ‹่ฒ ๅฅ‘็ด„ใ€ใงใฏใชใใ€Œๅ”ๅƒใ€\r\n\r\nใ€Œๆœฌๅฝ“ใซๅฟ…่ฆใชๆ•‘ๆ€ฅ่ฆ่ซ‹ใซๅฟœใˆใ‚‰ใ‚Œใ‚‹ไป•็ต„ใฟใฅใใ‚Šใ€\r\nใ€Œๆ„่ญ˜ใ›ใšใซๅฅๅบท่กŒๅ‹•ใŒ็ฟ’ๆ…ฃๅŒ–ใ•ใ‚Œใ‚‹ไป•็ต„ใฟใฅใใ‚Šใ€\r\n\r\nๅธ‚่ทๅ“กใฎใ‹ใŸใŒ่ชฒ้กŒใ‚’ใƒ—ใƒฌใ‚ผใƒณใ™ใ‚‹\r\n\r\n10/23 ใพใงๅ‹Ÿ้›†ใ—ใฆใ„ใ‚‹ใฎใงใ€ๅฟœๅ‹ŸใŠๅพ…ใกใ—ใฆใŠใ‚Šใพใ™๏ผ\r\nhttp://www.city.kobe.lg.jp/information/press/2017/09/20170906041901.html\r\n\r\n\r\n# QQ็ฅžๆˆธ\r\n\r\n่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”\r\n\r\nโ†‘ใฎ โ€œUrban Innovation Kobe\" ใซใ‚จใƒณใƒˆใƒชใƒผใ—ใŸ่ฉฑ\r\nๆ•‘ๆ€ฅใซใฏใ€็พ็€ใฎๆ™‚้–“ใŒ้‡่ฆ๏ผใ—ใ‹ใ—H๏ผ—ๅนด๏ผš6ๅˆ†โ‡จH๏ผ’๏ผ—ๅนด๏ผš๏ผ˜ๅˆ†ใจๅปถใณๆฐ—ๅ‘ณ\r\nAED 3 ๅˆ†ไปฅๅ†…ใซๆŽขใ—ๅ‡บใ™\r\n\r\nๆ•‘ๆ€ฅๆฌ้€ใฎ่ฆ่ซ‹ใŒใƒ‘ใƒณใ‚ฏใ—ใฆใ„ใฆ้šฃใฎใ‚จใƒชใ‚ขใ‹ใ‚‰ๅ›žใ—ใŸใ‚Šใ—ใฆใ„ใ‚‹ใ‚‰ใ—ใ„ใ€‚ใใ‚Œใงไผธใณใฆใ„ใ‚‹ใ€‚\r\n\r\nใ€Œ#7119ใ€ใฎใ‚ตใƒผใƒ“ใ‚นใƒ‡ใ‚ถใ‚คใƒณใŒใใ‚‚ใใ‚‚้–“้•ใฃใฆใ„ใ‚‹\r\n\r\nUber็š„ใ‚ตใƒผใƒ“ใ‚นใƒขใƒ‡ใƒซโ‡จใ“ใงใงใ‚‚่งฃๆฑบใ—ใชใ„ใ€‚\r\nๅฟ…่ฆใชใฎใฏโ€ฆ\r\nใƒปๅœฐๅŸŸไบ’ๅŠฉ\r\nใƒปๅƒใๆ–นๆ”น้ฉ\r\nใƒปใƒ˜ใƒซใ‚นใƒ‡ใƒผใ‚ฟใ‚’ๅ–ใ‚Šๆˆปใ™\r\n\r\n# ็ฅžๆˆธๅธ‚่ทๅ“กๆœ‰ๅฟ—ๅญฆ็ฟ’ๆ”ฏๆดไบ‹ๆฅญ\r\n\r\nๅฐๆž—\r\n\r\nๅกพใซ่กŒใ‘ใชใ„ไธญ๏ผ“ใธใฎๆ•™่‚ฒ\r\n\r\nใ‚ฏใƒฉใ‚ฆใƒ‰ใƒ•ใ‚กใƒณใƒ‡ใ‚ฃใƒณใ‚ฐ๏ผˆReady for๏ผ‰ใ‚’ๅฎŸๆ–ฝไธญ\r\nhttps://readyfor.jp/projects/hiro4317\r\n\r\nใ“ใ‚“ใชๅญไพ›ใŒๅ—่ฌ›ใ—ใฆใ„ใพใ™\r\n\r\n1. ๆฏๅญๅฎถๅบญ\r\n2. ๅญฆๆ กใซใคใ„ใฆใ„ใ‘ใชใ„\r\n3. ไธ€ไบบใงๅ‹‰ๅผทใงใใชใ„ใ€ๆˆ็ธพใŒไผธใณใš่ฝใก่พผใ‚“ใงใ„ใ‚‹\r\n\r\n\r\n\r\n\r\n# ๅœฐ็†ๆƒ…ๅ ฑใ‚’ไฝฟใฃใŸ้›ชๅฎณ่ปฝๆธ›\r\n\r\nๆพค็”ฐ\r\n\r\nใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟไบฌ้ƒฝๅฎŸ่ทตไผš\r\n\r\n้›ชๅฎณใฏ็ฝๅฎณใจใ—ใฆ่ช็Ÿฅใ•ใ‚Œใซใใ„็พ็ŠถใŒใ‚ใ‚‹ใ€‚\r\n\r\n[u-map](http://wiki.openstreetmap.org/wiki/JA:UMap) ใ‚’ไฝฟใฃใฆๅœฐๅŸŸ็‹ฌ่‡ชใฎ้›ชๅฎณๅฏพ็ญ–้˜ฒ็ฝใƒžใƒƒใƒ—ใฎไฝœ่ฃฝ\r\n[Mapillary](http://wiki.openstreetmap.org/wiki/JA:Mapillary) ใงๅ†™็œŸใ‚ขใƒƒใƒ—ใƒญใƒผใƒ‰\r\n[QGIS](http://qgis.org/ja/site/)\r\n\r\n้›ชๅฎณใ‚’ๅฐ‘ใ—ใงใ‚‚ๆธ›ใ‚‰ใ™ใŸใ‚ใซโ€ฆ ่ถŠๅพŒ้›ชใ‹ใ้“ๅ ด\r\n\r\n\r\n# Code for ้ธๆŒ™\r\n\r\nๅทไบ•\r\n\r\n็ฅžๆˆธใฏ้ธๆŒ™ใŒๅŒๆ™‚ๅคš็™บใงๅคงๅค‰๏ผ\r\n็ซ‹ๅ€™่ฃœ่€…ๆƒ…ๅ ฑใฏๅ…ฌๆ–‡ๆ›ธใชใฎใซใƒ‡ใƒผใ‚ฟใงๅ…ฌ้–‹ใ•ใ‚Œใฆใชใ„๏ผ\r\nWikidataใ€€ใงไฝœใฃใฆใพใ™ใ€€S๏ผˆไธป่ชž๏ผ‰P๏ผˆใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃ๏ผ‰O๏ผˆ็›ฎ็š„่ชž๏ผ‰ใง่จ˜่ฟฐใ—ใพใ™\r\nSPARQLใง่จ˜่ฟฐใ—ใฆใƒ‡ใƒผใ‚ฟๆŠฝๅ‡บใ—ใพใ™ใ€‚\r\n้ธๆŒ™ใซ่กŒใ“ใ†๏ผ\r\n\r\nhttps://speakerdeck.com/kwi/code-for-xuan-ju\r\n\r\n\r\n[https://speakerdeck.com/kwi/code-for-xuan-ju](https://speakerdeck.com/kwi/code-for-xuan-ju)\r\n\r\n# ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ๅ‚ๅŠ ่€…ๅ‹Ÿ้›†\r\n\r\n่ฅฟ่ฐท\r\n\r\nCOG ไปŠๅนดใ‚‚ๅ‡บใŸใ„ใจใ„ใ†่ฉฑ\r\n\r\n\r\n\r\n# Fw:ๆฑๅŒ—็ฌฌ6ๅ›žๅ…ฑๅ‰ตใ‚คใƒ™ใƒณใƒˆ ใ€Žใ‚นใƒใƒผใƒ„ใ‚’้€šใ˜ใฆใ€ใƒ€ใ‚คใƒใƒผใ‚ทใƒ†ใ‚ฃใ‚’ๅฎŸ็พใ™ใ‚‹ใพใกใฅใใ‚Šใ€(10ๆœˆ28-29ๆ—ฅin็ฅžๆˆธ)ๅ‘Š็Ÿฅ\r\n\r\n็‰ง\r\n\r\nFW:ๆฑๅŒ—\r\n้™ธๅ‰้ซ˜็”ฐๅธ‚ใ‚’ใƒ€ใ‚คใƒใƒผใ‚ทใƒ†ใ‚ฃใฎๅ…ฑๅ‰ตใง็››ใ‚ŠไธŠใ’ใ‚‹๏ผ\r\n้šœๅฎณ่€…ใ‚นใƒใƒผใƒ„ใฎใƒกใƒƒใ‚ซใซใ™ใ‚‹ใŸใ‚ใฎใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณใ‚’็ฅžๆˆธใง้–‹ๅ‚ฌ\r\n\r\nFw:ๆฑๅŒ—ใ‚คใƒกใƒผใ‚ธใ‚ญใƒฃใƒฉใ‚ฏใ‚ฟใƒผใฎใ‚“๏ผˆ่ƒฝๅนด็Žฒๅฅˆ๏ผ‰ใ•ใ‚“ใŒใใ‚‹ใจใใฏใŠ็Ÿฅใ‚‰ใ›ใ•ใ‚Œใพใ™ใ€‚\r\nๅ‚ๅŠ ่€…ๅคงๅ‹Ÿ้›†ไธญ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ\r\n\r\n# ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใฎๆบๆตใ‚’ๆŽขใ‚‹ใƒผๆ—ฅๆœฌใฎๅœฐๅŸŸๆƒ…ๅ ฑๅŒ–ใฎ็ณป่ญœ\r\n\r\nๅ’ŒๅดŽ\r\nๅœฐๅŸŸSNSๅ…จๅ›ฝใƒ•ใ‚ฉใƒผใƒฉใƒ ใฎ็†ฑใ‚’ๆ€ใ„ๅ‡บใ™ใ€‚\r\nใ‚ฐใƒฉใƒฌใ‚ณใ„ใ„ใงใ™ใญ๏ผ\r\nๆ—ฅๆœฌใฏใ‚‚ใจใ‚‚ใจGive & Giveใฎๅ›ฝใ ใฃใŸใ€‚\r\n็พๅœจใฏๆƒ…ๅ ฑๅŒ–ใฎ็ฌฌ4ๆœŸ\r\nใƒญใƒผใ‚ซใƒซโ‡จใ‚ฐใƒญใƒผใƒใƒซโ‡จใƒญใƒผใ‚ซใƒซใซๅ›žๅธฐใ™ใ‚‹๏ผŸ๏ผšใ‚ฐใƒญใƒผใƒใƒซใงใฏๅพ—ใ‚‰ใ‚Œใชใ„ๅนธ็ฆใ‚’ๆฑ‚ใ‚ใฆ\r\n\r\n1990ๅนดไปฃๅพŒๅŠใซใ€ใ€Œใƒใƒƒใƒˆใƒ‡ใ‚คใ€ใงๅœฐๅŸŸใฎใฟใ‚“ใชใŒใŠ้‡‘ใจๆŠ€่ก“ใ‚’ๆŒใกๅฏ„ใฃใฆๅญฆๆ กใซๅ›ž็ทšใ‚’ใฒใ„ใŸ๏ผ\r\nๅœฐๅŸŸSNSใ‚’ๆดป็”จใ—ใŸๆง˜ใ€…ใชๆดปๅ‹•ใŒใ‚ใฃใŸ๏ผ\r\n็พๅœจใฎCode for ใซ็น‹ใŒใฃใฆใ‚‹\r\n\r\nใ‚จใƒณใƒ‘ใƒฏใƒผใƒกใƒณใƒˆ(E)๏ผๆ‰่ƒฝ(M)ร—็น‹ใŒใ‚Š(C)ใฎ2ไน—\r\n\r\nๅผ•ใไธŠใ’ใฆใใ‚Œใ‚‹ไบบใŒใ„ใŸใ‹ใ‚‰ใ€ๅœฐๅŸŸๆƒ…ๅ ฑๅŒ–ใ‚’ใ™ใ™ใ‚ใ‚‹ใ“ใจใŒใงใใŸใ€‚\r\n\r\n" }, { "alpha_fraction": 0.6726078987121582, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 17.72222137451172, "blob_id": "8cb47762f8efdb289dfb7a4bcf7eb996b15c8b5e", "content_id": "49e65841f2e0639d4f39463b9ee35ccff5d429d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3732, "license_type": "no_license", "max_line_length": 99, "num_lines": 108, "path": "/_posts/2017-11-16-meeting33.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš33rd\r\ndate: 2017-11-16 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)CfKใฎ่กŒๅ‹•่ฆ็ฏ„ใฏใ˜ใ‚ใพใ—ใŸ(่ฅฟ่ฐท)5ๅˆ†\r\n (2)้›ฒๅ—ๅธ‚็ ”ไฟฎๆ—…่กŒๅ ฑๅ‘Š(่กŒๆ”ฟใ‚บ)30ๅˆ†\r\n (3)Urban Innovation Kobeใฎไฝ•ใŒใ—ใ‹(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)15ๅˆ†\r\n (4)FOODPICTใƒ”ใƒƒใƒ(่Šๆฑ )5ๅˆ†\r\n (5)ๆฌกๅ›žใฏๅฟ˜ๅนดไผš๏ผ†CfK็ฅ3ๆญณใ€ใใฎๆฌกใฏๆ–ฐๅนดไผšใงใ™ใ‚ˆใƒผ(่ฅฟ่ฐท)5ๅˆ†\r\n (6)ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—(COGๅ‚ๅŠ ่€…)60ๅˆ†โ€ปCOGๅ‚ๅŠ ่€…ใฎใฟ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1931881857086373/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/r1cPf1oJf)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=3446)]\r\n\r\n\r\n# Code for Kobeๅฎšไพ‹ไผš33rd\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ” <http://www.kigyoplaza-hyogo.jp/>\r\n\r\n2.ใŠๅ“ๆ›ธใ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (1)CfKใฎ่กŒๅ‹•่ฆ็ฏ„ใฏใ˜ใ‚ใพใ—ใŸ(่ฅฟ่ฐท)5ๅˆ†\r\n- (2)้›ฒๅ—ๅธ‚็ ”ไฟฎๆ—…่กŒๅ ฑๅ‘Š(่กŒๆ”ฟใ‚บ)30ๅˆ†\r\n- (3)Urban Innovation Kobeใฎไฝ•ใŒใ—ใ‹(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)15ๅˆ†\r\n- (4)FOODPICTใƒ”ใƒƒใƒ(่Šๆฑ )5ๅˆ†\r\n- (5)ๆฌกๅ›žใฏๅฟ˜ๅนดไผš๏ผ†CfK็ฅ3ๆญณใ€ใใฎๆฌกใฏๆ–ฐๅนดไผšใงใ™ใ‚ˆใƒผ(่ฅฟ่ฐท)5ๅˆ†\r\n- (6)ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—(COGๅ‚ๅŠ ่€…)60ๅˆ†โ€ปCOGๅ‚ๅŠ ่€…ใฎใฟ\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n\r\n## ่กŒๅ‹•่ฆ็ฏ„ใคใใ‚Šใพใ—ใŸ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nCode for America ใŒๅ…ƒใซใชใฃใฆใ„ใ‚‹ใ‚‚ใฎใ‚’ไฝฟใฃใฆใ„ใพใ™ใ€‚\r\n\r\n## ้›ฒๅ—ใƒใƒฃใƒฌใƒณใ‚ธ่ฆ‹ใฆใใพใ—ใŸ\r\n็ญ’ไบ•ใ•ใ‚“\r\n\r\n็ตŒ็ทฏ๏ผšๅŽปๅนดใฎไปŠ้ ƒใ€ๆฆŠๅŽŸใ•ใ‚“ใŒ้›ฒๅ—ๅธ‚ใ‚’่จชใ‚Œใฆใ€่กŒๆ”ฟใจไฝๆฐ‘ใŒๅ††ๅ“ใง่‡ชๆฒปใ‚’ๅฎŸๆ–ฝใ—ใฆใ„ใ‚‹ใฎใ‚’่ฆ‹ใฆใใฆใ„ใŸใ€‚ใใฎ่ฉฑใŒใ€Œ่กŒๆ”ฟใ‚บใ€ใฎใƒกใƒณใƒใƒผใซๅฑ•้–‹ใ•ใ‚Œใ€ใ„ใ‚ใ„ใ‚ใ‚ใฃใŸๆœซใซใ€่จชๅ•ใ—ใฆใใพใ—ใŸใ€ใจใ„ใ†ใฎใŒ็ฐกๅ˜ใช็ตŒ็ทฏใ€‚\r\n\r\n่ฅฟ่ฐทใ•ใ‚“ใ€้ซ˜ๆฉ‹ใ•ใ‚“ใ€็ญ’ไบ•ใ•ใ‚“ใ€ใชใฉใชใฉใฎใƒกใƒณใƒใƒผใง้›ฒๅ—ๅธ‚่ฆ–ๅฏŸใ—ใฆใใพใ—ใŸใ€‚\r\n\r\n่ชฒ้กŒๅ…ˆ้€ฒๅœฐๅŸŸใจๅ‘ผใฐใ‚Œใฆใ„ใ‚‹ใ€‚ใใ‚Œใฏ RESAS ใง่ฆ‹ใฆใ€้ซ˜้ฝขๅŒ–็އใŒ็พๆ™‚็‚นใงๆ—ขใซ้ซ˜ใ„๏ผˆ36%๏ผ‰ใ€‚ใ“ใ‚Œใฏใกใ‚‡ใ†ใฉ็ฅžๆˆธใฎ 2040 ๅนดใงใฎๆ•ฐๅ€คใจๅŒใ˜ใ€‚่ถ…้ซ˜้ฝขๅฑ€้ขใŒ้›ฒๅ—ๅธ‚ใซ็พใ‚Œใฆใ„ใ‚‹ใ€‚\r\n\r\nๆ—ขใซๆง˜ใ€…ใช่ฆ–ๅฏŸใƒฌใƒใƒผใƒˆใŒใ‚ใ‚‹ใฎใงใ€google ใงๆคœ็ดขใ™ใ‚‹ใจ่ฆ‹ใคใ‹ใ‚‹ใ€‚\r\n\r\nๅ†…้–ฃๅบœใฎใ€Œๅฐใ•ใชๆ‹ ็‚นใ‚’ๆดปใ‹ใ—ใŸ๏ฝžใ€ใฎ่ณ‡ๆ–™\r\n<http://www.cao.go.jp/regional_management/>\r\n\r\nใใ“ใงใ€Œ้›ฒๅ—ใƒใƒฃใƒฌใƒณใ‚ธใ€\r\n<http://www.co-unnan.jp/>\r\n- ใ“ใฉใ‚‚ใƒใƒฃใƒฌใƒณใ‚ธ\r\n- ่‹ฅ่€…ใƒใƒฃใƒฌใƒณใ‚ธ๏ผšไป•ไบ‹ใ‚’ไฝœใฃใฆใ„ใ“ใ†\r\n- ๅคงไบบใƒใƒฃใƒฌใƒณใ‚ธ๏ผšไปŠใ‚ใ‚‹่ชฒ้กŒใ‚’่งฃๆฑบใ—ใ‚ˆใ†\r\n\r\nใใ‚Œใžใ‚Œใฎๆดปๅ‹•ๆˆๆžœใ‚’็™บ่กจใ™ใ‚‹ใ‚คใƒ™ใƒณใƒˆใ€ŒใคใชใŒใ‚‹้›ฒๅ—ใƒใƒฃใƒฌใƒณใ‚ธ 2017ใ€\r\n\r\n\r\n## Urban Innovation Kobeใฎไฝ•ใŒใ—ใ‹\r\n่–ฌๅธซๅฏบใ•ใ‚“\r\n\r\nentry ใ—ใฆ้ธ่€ƒ้€ฒใ‚“ใงใ„ใพใ™ใ€‚\r\n\r\n\r\n## FOODPICTใƒ”ใƒƒใƒ\r\n่Šๆฑ ใ•ใ‚“\r\n\r\n<https://www.foodpict.com/>\r\n\r\nKobe global startup gateway ใซๅ‚ๅŠ ใ—ใฆใ„ใ‚‹ใ€‚\r\n\r\nไปŠใพใงใฎ foodpict ใ‚’ใƒ‡ใ‚ธใ‚ฟใƒซใงๆ‹กๅผตใ™ใ‚‹ใ“ใจใ‚’่€ƒใˆใฆใ„ใ‚‹ใ€‚\r\nAI ใซใ‚ˆใ‚‹ๅˆคๅฎšใ‚’ไฝต็”จใ™ใ‚‹ๆ–นๆณ•ใ‚’่€ƒใˆใฆใ„ใ‚‹ใ€‚\r\n<https://azure.microsoft.com/ja-jp/services/cognitive-services/custom-vision-service/>\r\n\r\ntwitter: @NOBU_KIKUCHI\r\nfacebook: nobuktaka.kikuchi.31\r\n\r\n\r\n## ็ฅžๆˆธ Code for Japan ใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผใ‚ทใƒƒใƒ—\r\n\r\nๆปๆ‘ใ•ใ‚“\r\n้ซ˜ๅฒธใ•ใ‚“\r\n\r\n<http://www.city.kobe.lg.jp/information/press/2017/10/20171031041901.html>\r\n\r\n- urban innovation kobe\r\n- 500 kobe accelerator\r\n- silicon valley ๆดพ้ฃ\r\n\r\n\r\n## ๆฌกๅ›žใฏๅฟ˜ๅนดไผš๏ผ†CfK็ฅ3ๆญณใ€ใใฎๆฌกใฏๆ–ฐๅนดไผšใงใ™ใ‚ˆใƒผ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\n\r\n\r\n## ใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\nCOGๅ‚ๅŠ ่€…\r\n\r\n" }, { "alpha_fraction": 0.6251882910728455, "alphanum_fraction": 0.7047303318977356, "avg_line_length": 16.11475372314453, "blob_id": "d980ad79d1c57a068261a52ed5e475a64b1fedee", "content_id": "7324593aca00e414c70d1f8d9c38609bf34e60a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5819, "license_type": "no_license", "max_line_length": 149, "num_lines": 183, "path": "/_posts/2018-04-19-meeting38.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš38th\r\ndate: 2018-04-19 19:00:00 +09:00\r\nauthor: ๅ‚ๅŠ ่€…\r\ncomments: true\r\ndescription: |\r\n (1)็‹ฌ็ซ‹ๅ ฑๅ‘Š(่ฅฟ่ฐท)10ๅˆ†\r\n (2)Social Hack Dayๅ‘Š็Ÿฅ(่ฅฟ่ฐท)5ๅˆ†\r\n (3)็ฅžๆˆธใ‚ฝใƒผใ‚ทใƒฃใƒซใƒ–ใƒชใƒƒใ‚ธๅ‘Š็Ÿฅ(ๆฐธ้‡Ž)15ๅˆ†\r\n (4)078ๅ‘Š็Ÿฅ(้•ทไบ•)10ๅˆ†\r\n (5)ใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃใฎ็ฅญๅ…ธ2018ๅ‘Š็Ÿฅ(ๆฟๅžฃ)5ๅˆ†\r\n (6)ใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒปใƒŸใƒ‹ใ‚ญใƒฃใƒณใƒ—inๅ…ตๅบซ2018ๅ‘Š็Ÿฅ(็Ÿณๆฉ‹)5ๅˆ†\r\n (7)Sentilo็š„ใช่ฉฑ(ๅทไบ•)15ๅˆ†\r\n (8)ไบฎๅนณๆพๆ‘ใฎใพใ ่จ€ใˆใชใ„่ฉฑ(ๆพๆ‘)30ๅˆ†ใ€œ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/885822544934490/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/HyRxLyL3G)\r\n/ Links:\r\n\r\n# Code for Kobe 38th\r\n\r\nhttps://www.facebook.com/events/885822544934490/\r\n\r\nITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ŒCode for Kobeใ€ใฎ็ฌฌ38ๅ›žๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผๅˆใ‚ใพใ—ใฆใฎๆ–นใ‚‚ใฉใชใŸใงใ‚‚ๅ‚ๅŠ ๆญ“่ฟŽ๏ผ\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n(http://www.kigyoplaza-hyogo.jp/)\r\nโ€ป19ๆ™‚ไปฅ้™ๅ…ฅ้คจ็ตŒ่ทฏใŒ่ค‡้›‘ใซใชใ‚Šใพใ™๏ผ้…ๅˆปใ•ใ‚Œใ‚‹ๆ–นใฏใƒกใƒƒใ‚ปใƒผใ‚ธ็ญ‰ใงใŠ็Ÿฅใ‚‰ใ›ใใ ใ•ใ„ใ€‚\r\n\r\n2.ใŠๅ“ๆ›ธใ(ๆ•ฌ็งฐ็•ฅ)\r\nใ€œไนพๆฏใƒปCoC่ชฌๆ˜Žใ€œ\r\n(1)็‹ฌ็ซ‹ๅ ฑๅ‘Š(่ฅฟ่ฐท)10ๅˆ†\r\n(2)Social Hack Dayๅ‘Š็Ÿฅ(่ฅฟ่ฐท)5ๅˆ†\r\n(3)็ฅžๆˆธใ‚ฝใƒผใ‚ทใƒฃใƒซใƒ–ใƒชใƒƒใ‚ธๅ‘Š็Ÿฅ(ๆฐธ้‡Ž)15ๅˆ†\r\n(4)078ๅ‘Š็Ÿฅ(้•ทไบ•)10ๅˆ†\r\n(5)ใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃใฎ็ฅญๅ…ธ2018ๅ‘Š็Ÿฅ(ๆฟๅžฃ)5ๅˆ†\r\n(6)ใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒปใƒŸใƒ‹ใ‚ญใƒฃใƒณใƒ—inๅ…ตๅบซ2018ๅ‘Š็Ÿฅ(็Ÿณๆฉ‹)5ๅˆ†\r\n(7)Sentilo็š„ใช่ฉฑ(ๅทไบ•)15ๅˆ†\r\n(8)ไบฎๅนณๆพๆ‘ใฎใพใ ่จ€ใˆใชใ„่ฉฑ(ๆพๆ‘)30ๅˆ†ใ€œ\r\nโ€ปใ”ๅธŒๆœ›ใ‚ใ‚Œใฐ้šๆ™‚ๅ—ใ‘ไป˜ใ‘ใพใ™๏ผ\r\nใ€œไบคๆตใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\n1,000ๅ††(ๅญฆ็”Ÿไปฅไธ‹็„กๆ–™)\r\n\r\n\r\n[่กŒๅ‹•่ฆ็ฏ„](https://github.com/codeforjapan/codeofconduct/blob/master/README.md) ใฎ็ดนไป‹ใ‚’ใ—ใพใ—ใŸ\r\n\r\n## ็‹ฌ็ซ‹ๅ ฑๅ‘Š\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\n- COOP ใ“ใ†ใน ใ‚’้€€่ทใ—ใพใ—ใŸใ€‚\r\n- ๅ€‹ไบบไบ‹ๆฅญไธปใ€Œ็ฅžๆˆธใƒ‹ใ‚ทใ‚ฟใƒ‹ไผ็”ปใ€\r\n - ใจใ‚Šใ‚„ใพใ•ใ‚“็™บๆกˆ\r\n - ็”ปๆ•ฐใŒ่‰ฏใ‹ใฃใŸ \r\n - ใƒญใ‚ด\r\n - $12ใใ‚‰ใ„ใฎๅˆถไฝœใ‚ตใ‚คใƒˆใงไฝœๆˆ\r\n - ใ€Œใ‚ใ‹ใ‚‹ใ“ใจใ€ใฎๆฎต้šŽใฎใ‚คใƒกใƒผใ‚ธ\r\n- 078ใซ็™ปๅฃ‡ใ—ใพใ™\r\n- ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏ x ๅ”ๅŒ็ต„ๅˆ\r\n - ๅ”ๅŒ็ต„ๅˆใซๆ‰€ๅฑžใ—ใฆใ„ใฆใ€ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใ‚’ใ‚„ใฃใฆใ„ใ‚‹ไบบใ‚’ใ‚ใพใ‚Šใฟใชใ„\r\n - ใƒ†ใƒƒใ‚ฏใฎใƒฌใƒ™ใƒซใ‚‚ๅ†…ๅค–ใงๅทฎใŒๅคงใใ„\r\n- ใ‚„ใ‚ใ†ใจๆ€ใฃใฆใ„ใ‚‹ใจใ“ใ‚\r\n - ใƒ‡ใƒผใ‚ฟใƒ˜ใƒซใ‚น\r\n - ใ‚‚ใฃใจ่ตทๆฅญใ—ใ‚„ใ™ใ„็คพไผšใธ\r\n\r\n## Social Hack Dayๅ‘Š็Ÿฅ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nhttps://www.facebook.com/events/210811293016202/\r\n\r\nไฝ•ใ‹ใ‚„ใ‚ŠใŸใ„ไบบใ€ใ‚„ใฃใฆใ„ใ‚‹ไบบใŒใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใ‚’ๆŒใก่พผใ‚“ใงใ€ไปฒ้–“ใ‚’ๅ‹Ÿใ‚Šใ€ใฟใ‚“ใชใงๆ‰‹ใ‚’ๅ‹•ใ‹ใ—ใชใŒใ‚‰ใ‚ตใƒผใƒ“ใ‚นใ‚’ใคใใ‚Šใ‚ใ’ใ‚‹ One day ใƒใƒƒใ‚ซใ‚ฝใƒณใงใ™ใ€‚\r\n\r\n## ็ฅžๆˆธใ‚ฝใƒผใ‚ทใƒฃใƒซใƒ–ใƒชใƒƒใ‚ธๅ‘Š็Ÿฅ\r\nๆฐธ้‡Ž ็ด”ไธ€้ƒŽใ•ใ‚“\r\n- Web ใƒ‡ใ‚ฃใƒฌใ‚ฏใ‚ฟใƒผ\r\n- 2013 ๅนดใ‹ใ‚‰ใƒ—ใƒญใƒœใƒŽ\r\n- 2017 NPO Service Grant ่ทๅ“กใ‚‚\r\n\r\nhttp://www.city.kobe.lg.jp/ward/activate/participate/platform/bridge.html\r\n\r\nไฝ•ใ‚’่ตทใ“ใ—ใŸใ„ใฎใ‹๏ผŸใใฎใŸใ‚ใซไฝ•ใ‚’ใ™ใ‚‹ใฎใ‹๏ผŸ\r\n- ๅœฐๅŸŸ็คพไผšใฎ่ชฒ้กŒใซๅ‘ใๅˆใˆใ‚‹่ก—ใซใ—ใŸใ„\r\n- ็คพไผš่ฒข็Œฎๆดปๅ‹•ใซ่ˆˆๅ‘ณใŒใ‚ใ‚‹๏ผใ ใ‘ใฉไธ€ๆญฉ่ธใฟๅ‡บใ›ใชใ„\r\n- ๅพช็’ฐใ•ใ›ใฆๅบƒใŒใ‚Šใ‚’ใ‚‚ใŸใ›ใ‚ˆใ†ใ€‚ๆฌกใฎใ‚ตใ‚คใ‚ฏใƒซใ‚’ๅฎŸๆ–ฝใ™ใ‚‹\r\n - ใ‚ปใƒŸใƒŠใƒผ้–‹ๅ‚ฌ\r\n - ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆๅฎŸๆ–ฝ๏ผˆ1 week ใƒˆใƒฉใ‚คใ‚ขใƒซ๏ผ‰\r\n - ใ‚นใƒ†ใƒƒใƒ—ใ‚ขใƒƒใƒ—ใƒใƒฃใƒฌใƒณใ‚ธ\r\n - ๆดปๅ‹•ๅ ฑๅ‘Šไผš\r\n\r\nใพใšใฏ่ชฌๆ˜Žไผšใธใ”ๅ‚ๅŠ ใใ ใ•ใ„\r\n- 4/20, 22, 5/9, 13, 23, 27\r\n- http://bit.ly/ksb_setsumei\r\n\r\n078KOBE AREA3 14:50-15:40ใ€Œใ‚ฏใƒชใ‚จใ‚คใ‚ฟใƒผใƒปใƒ‡ใ‚ถใ‚คใƒŠใƒผใฎใŸใ‚ใฎใƒ—ใƒญใƒœใƒŽๅ…ฅ้–€ใ€\r\n\r\n## 078ๅ‘Š็Ÿฅ\r\n้•ทไบ•ใ•ใ‚“\r\n\r\n078KOBE interactive ใฎ่ฒฌไปป่€…ใ‚„ใฃใฆใพใ™ใ€‚\r\nhttp://078kobe.jp/about/\r\n็ฅžๆˆธใงใฏใ˜ใพใฃใŸใ‚ฏใƒญใ‚นใƒกใƒ‡ใ‚ฃใ‚ขใ‚คใƒ™ใƒณใƒˆใจใ—ใฆใ‚„ใ‚Šใพใ™ใ€‚\r\nSXSW ใ‚’ใƒขใƒ‡ใƒซใซใ—ใฆใ„ใพใ™ใ€‚\r\n\r\nๅ‘Š็Ÿฅๆ‹กๆ•ฃใŠ้ก˜ใ„ใ—ใพใ™๏ผ\r\n\r\n\r\n## ใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃใฎ็ฅญๅ…ธ2018ๅ‘Š็Ÿฅ\r\nๆฟๅžฃ ๅฎๆ˜Žใ•ใ‚“\r\nNPOๆณ•ไบบใ‚ขใ‚คใƒปใ‚ณใƒฉใƒœใƒฌใƒผใ‚ทใƒงใƒณ็ฅžๆˆธ\r\n\r\nhttp://accfes.com/\r\n็งใŸใกใฎๆœชๆฅใ‚’ใคใใ‚‹ใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃ\r\n\r\nใƒ›ใƒผใƒ ใƒšใƒผใ‚ธๅˆถไฝœใƒปใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃใ‚’้‡็‚น็š„ใซใ‚„ใฃใฆใ„ใ‚‹\r\n\r\nใ€Œใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ‡ใ‚ถใ‚คใƒณใฎโ—‹โ—‹ใ€ใจใ„ใ†ใ‚‚ใฎใฏๆ„ๅค–ใจไฝฟใ„ใซใใ‹ใฃใŸใ‚Šใ™ใ‚‹ใ€‚\r\n\r\nใ‚คใƒ™ใƒณใƒˆใฎๆ—ฅ็จ‹ใฏ http://globalaccessibilityawarenessday.org/ ใฎๅ•“็™บใฎๆ—ฅใจใ—ใฆ่จญๅฎšใ•ใ‚Œใฆใ„ใ‚‹ใ€‚\r\n\r\n\r\n## ใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒปใƒŸใƒ‹ใ‚ญใƒฃใƒณใƒ—inๅ…ตๅบซ2018ๅ‘Š็Ÿฅ\r\n็Ÿณๆฉ‹ใ•ใ‚“\r\n\r\n[ใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒปใƒŸใƒ‹ใ‚ญใƒฃใƒณใƒ— in ๅ…ตๅบซ 2018](http://www.security-camp.org/minicamp/hyogo2018.html)\r\n\r\n\r\n## Sentilo็š„ใช่ฉฑ\r\nๅทไบ•ใ•ใ‚“\r\n\r\n<script async class=\"speakerdeck-embed\" data-id=\"c86eb1e3dfeb4d6db211a2ae4ec8a269\" data-ratio=\"1.6\" src=\"//speakerdeck.com/assets/embed.js\"></script>\r\n\r\nๆธฌใฃใฆใฟใ‚ˆใ†ใ‚ˆ\r\n\r\n### ๅ…ˆ่กŒไบ‹ไพ‹\r\n\r\n- ใƒใƒซใ‚ปใƒญใƒŠใฎ็’ฐๅขƒใ‚ปใƒณใ‚ตใƒผ็ถฒใƒปSentilo\r\n ใƒ‡ใƒผใ‚ฟใŒไธ€่ˆฌๅ…ฌ้–‹ใ•ใ‚Œใฆใ„ใ‚‹\r\n\r\n- ็ฅžๆˆธใ‚นใƒžใƒผใƒˆใƒฉใƒณใƒ‹ใƒณใ‚ฐใ‚ตใƒผใƒ“ใ‚น ็ฅžๆˆธๅธ‚๏ฝ˜ใ‚ขใ‚ทใƒƒใ‚ฏใ‚น\r\n ใƒฉใƒณใƒŠใƒผๅ‘ใ‘ใฎใƒฉใƒณใƒ‹ใƒณใ‚ฐใƒญใ‚ฐใ‚ตใƒผใƒ“ใ‚น\r\n\r\n\r\n### ่ฉฆใฟ\r\n\r\n็ฅžๆˆธใง่ตฐใ‚‹ใจ็ฉบๆฐ—ๆฑšใ„๏ผŸ\r\n\r\nๆธฌใฃใฆใฟใŸ\r\n\r\nๆธฌๅฎšใ‚ญใƒƒใƒˆใ‚’ไฝœใฃใŸ\r\n- No๏ผ’ๆฟƒๅบฆ\r\n- GPS\r\n- ๆธฉๅบฆใƒปๆนฟๅบฆใƒปๆฐ—ๅœง\r\n - ใ‚ปใƒณใ‚ตใฎๅ‹•ไฝœ็’ฐๅขƒ\r\n- Grove IoTใ‚นใ‚ฟใƒผใ‚ฟใƒผใ‚ญใƒƒใƒˆ for SORACOM\r\n- etc\r\n\r\nใฟใ‚“ใชใ‚„ใ‚Šใพใ›ใ‚“ใ‹\r\n\r\n## scratch workshop\r\nๅ €็”ฐใ•ใ‚“\r\n\r\nๅคง้˜ชๅคงๅญฆใ€Œใ„ใกใ‚‡ใ†็ฅญใ‚Šใ€ใง scratch ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—้–‹ๅ‚ฌ\r\nhttp://kyokuri.org/scratchday2018/\r\n\r\n\r\n## ไบฎๅนณๆพๆ‘ใฎUrban Innovation Kobeใช่ฉฑ\r\nๆพๆ‘ใ•ใ‚“\r\n\r\nใ€Œurban innovation kobeใ€\r\n\r\n2018 ๅนดๅบฆใฏ 4 ใคใฎใƒ†ใƒผใƒž 8 ใคใฎ่ชฒ้กŒ\r\n\r\nspecial workshop in 078\r\n- 2018-04-27 12:30 -- 13:50 KIITO 303\r\n- https://uik2018-078.peatix.com/\r\n\r\n\r\n" }, { "alpha_fraction": 0.8433048725128174, "alphanum_fraction": 0.8494098782539368, "avg_line_length": 51.41304397583008, "blob_id": "1b2e5985371ad09d1f8c99dead3f7d1ab71a7b51", "content_id": "b4acfefb0952c2744106afd36193352494f921f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6333, "license_type": "no_license", "max_line_length": 296, "num_lines": 46, "path": "/CODE_OF_CONDUCT.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: default\r\n---\r\n# Code for Kobe ใฎ่กŒๅ‹•่ฆ็ฏ„\r\n\r\nCode for Kobe ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒกใƒณใƒใƒผใฏใ€Code for ใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใฎ่กŒๅ‹•ใ€ใ‚คใƒ™ใƒณใƒˆใ€ใƒ‡ใ‚ธใ‚ฟใƒซใƒ•ใ‚ฉใƒผใƒฉใƒ ใชใฉใซใŠใ„ใฆใ€ไปฅไธ‹ใฎ็ŠถๆณใŒไฟ้šœใ•ใ‚Œใ‚‹ใ“ใจใ‚’ๆœŸๅพ…ใ—ใพใ™ใ€‚\r\n\r\n1. ๅ…จใฆใฎๅ‚ๅŠ ่€…ใซๅฏพใ—ๅฎ‰ๅ…จใงใ€ๆ•ฌๆ„ใŒ็คบใ•ใ‚ŒใŸ็’ฐๅขƒใงใ™\r\n2. ไบบใ€…ใŒ่‡ช็”ฑใ‹ใคๅฎŒๅ…จใซๅ€‹ไบบใฎๅ€‹ๆ€งใ‚’่กจ็พใ™ใ‚‹ใ“ใจใŒใงใใ‚‹ๅ ดๆ‰€ใงใ™\r\n3. ๅ…จใฆใฎไบบใฎใ‚ขใ‚คใƒ‡ใ‚ขใ€ใ‚นใ‚ญใƒซใ€่ฒข็Œฎใซไพกๅ€คใŒใ‚ใ‚‹ใจใ“ใจใŒๅ‰ๆใจใชใฃใฆใ„ใพใ™\r\n4. ๅ…จใฆใฎไบบใ€…ใŒๅŒใ˜ๆ–‡่„ˆใ‚’ๅ…ฑๆœ‰ใ—ใฆใ„ใ‚‹ใจ่€ƒใˆใšใ€่ณชๅ•ใ‚’ๅฅจๅŠฑใ—ใพใ™\r\n5. ไบบใ€…ใŒ๏ผˆๆŠ€่ก“็š„ใ‹ใใ†ใงใชใ„ใ‹ใซ้–ขใ‚ใ‚‰ใš๏ผ‰่ƒฝๅŠ›ใ‚„็†ฑๆ„ใ‚’ๆŒใฃใฆ็”Ÿ็”ฃ็š„ใซใชใ‚‹ๆ–นๆณ•ใ‚’ๆŽขใ—ใพใ™ใ€‚ใ€Œใ„ใ„ใˆ/ใ—ใ‹ใ—ใ€(no/but)ใงใฏใชใใ€ใ€Œใฏใ„/ใ•ใ‚‰ใซใ€(yes/and)ใจใ„ใ†่จ€่‘‰ใ‚’ไฝฟใ„ใพใ™ใ€‚\r\n6. ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒกใƒณใƒใƒผใ‚„ๅ‚ๅŠ ่€…ใซใ€่ฉฑใ™ใ“ใจใ‚ˆใ‚Šใ‚‚่žใใ“ใจใฎๆ–นใ‚’ๅฅจๅŠฑใ—ใพใ™ใ€‚\r\n7. ่ชฐใ‚‚ใŒๅˆฉ็”จใงใใ‚‹ใ‚ˆใ†ใ€ใ‚ชใƒผใƒ—ใƒณใง่‡ช็”ฑใชๆŠ€่ก“ใ‚’ไฝฟใฃใŸใƒ„ใƒผใƒซใ‚’ไฝœใ‚ŠไธŠใ’ใ‚‹ใ“ใจใ‚’ๅŠชๅŠ›ใ—ใพใ™ใ€‚ๅ€‹ไบบ็š„ใชๅˆฉๅพ—ใงใฏใชใใ€ๅ…ฌๅ…ฑใฎไพฟ็›Šใ‚’ไฟƒ้€ฒใ™ใ‚‹ๆดปๅ‹•ใŒๅ„ชๅ…ˆใ•ใ‚Œใพใ™ใ€‚\r\n8. ๅธ‚ๆฐ‘ๅ‚็”ปใซใŠใ„ใฆไผ็ตฑ็š„ใซ้™คๅค–ใ•ใ‚ŒใฆใใŸใ‚ˆใ†ใชไบบใ€…ใธใฎใ‚ขใ‚ฏใ‚ปใ‚นใ‚„ใ€ๅฝผใ‚‰ใ‹ใ‚‰ใฎใ‚คใƒณใƒ—ใƒƒใƒˆ้‡่ฆ–ใ—ใพใ™ใ€‚\r\n9. ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใŒใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใฎ่จˆ็”ปใ‚„่จญ่จˆใ€ๅฎŸ่ฃ…ใชใฉใ‚’ไปฃ่กจใ™ใ‚‹ใ‚ˆใ†ใซๅฐฝๅŠ›ใ—ใพใ™ใ€‚ใ“ใ‚Œใฏใ€ๅฅณๆ€งใ€ใƒžใ‚คใƒŽใƒชใƒ†ใ‚ฃใ€ไผ็ตฑ็š„ใซๆŽ’้™คใ•ใ‚ŒใฆใใŸใ‚ˆใ†ใชไบบใ€…ใฎๅ‚ๅŠ ใ‚’ๅฅจๅŠฑใ™ใ‚‹ใ“ใจใ‚‚ๅซใฟใพใ™ใ€‚\r\n10. ๆ„ๆ€ๆฑบๅฎšใƒ—ใƒญใ‚ปใ‚นใซใ€ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ‚ฐใƒซใƒผใƒ—ใ‚„ๅฐ‚้–€ๅฎถใ‚’็ฉๆฅต็š„ใซๅ‚ๅŠ ใ—ใฆใ‚‚ใ‚‰ใ„ใพใ™ใ€‚\r\n11. ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒกใƒณใƒใƒผใ€ๅœฐๆ–น่‡ชๆฒปไฝ“ใ‚นใ‚ฟใƒƒใƒ•ใ€ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒ‘ใƒผใƒˆใƒŠใƒผใจใฎ้–ขไฟ‚ใ‚„ไผš่ฉฑใŒๅฐŠ้‡ใ•ใ‚Œใ€ๅ‚ๅŠ ใ—ใ€็”Ÿ็”ฃๆ€งใ‚’็ถญๆŒใ™ใ‚‹ใ“ใจใ‚’็ขบๅฎŸใซใ—ใพใ™ใ€‚\r\n12. ๅทฎๅˆฅใ‚„ๅซŒใŒใ‚‰ใ›ใฎใชใ„็’ฐๅขƒใ‚’ๆไพ›ใ—ใพใ™ใ€‚ Code for Kobe ใฏใ€ใ“ใ‚Œใ‚‰ใฎใƒใƒชใ‚ทใƒผใซ้•ๅใ—ใŸไบบใฏ่ชฐใงใ‚ใ‚ŒCode for Kobe ใฎใƒใƒƒใƒˆใƒฏใƒผใ‚ฏๆดปๅ‹•ใ€ใ‚คใƒ™ใƒณใƒˆใ€ใŠใ‚ˆใณใƒ‡ใ‚ธใ‚ฟใƒซใƒ•ใ‚ฉใƒผใƒฉใƒ ใซๅ‚ๅŠ ใ—ใชใ„ใ‚ˆใ†ใซไพ้ ผใ™ใ‚‹ๆจฉๅˆฉใ‚’ไฟๆŒใ—ใพใ™ใ€‚\r\n\r\n## Code for Kobe ใฎใ‚ขใƒณใƒใƒใƒฉใ‚นใƒกใƒณใƒˆใƒใƒชใ‚ทใƒผ\r\n\r\nCode for Kobe ใฏใ€ๆ€งๅˆฅใ€ๆ€งๅŒไธ€ๆ€งใŠใ‚ˆใณ่กจ็พใ€ๆ€ง็š„ๆŒ‡ๅ‘ใ€่บซไฝ“็š„ๅค–่ฆ‹ใ€่บซไฝ“ใฎๅคงใใ•ใ€ไบบ็จฎใ€ๅนด้ฝขใ€ใพใŸใฏๅฎ—ๆ•™ใซใ‹ใ‹ใ‚ใ‚‰ใšใ€ใ™ในใฆใฎไบบใซใƒใƒฉใ‚นใƒกใƒณใƒˆใฎใชใ„็’ฐๅขƒใ‚’ๆไพ›ใ™ใ‚‹ใ“ใจใซๅฐ‚ๅฟตใ—ใฆใ„ใพใ™ใ€‚ ็งใŸใกใฏใ€ใ‚นใ‚ฟใƒƒใƒ•ใ€ใƒ—ใƒฌใ‚ผใƒณใ‚ฟใƒผใ€ใŠใ‚ˆใณๅ‚ๅŠ ่€…ใฎใ„ใ‹ใชใ‚‹ๅฝขใฎใƒใƒฉใ‚นใƒกใƒณใƒˆใ‚‚ๅฎน่ชใ—ใพใ›ใ‚“ใ€‚ ๆ€ง็š„่จ€่ชžใจ็”ปๅƒใฏใ€Code for Kobe ใฎใ‚คใƒ™ใƒณใƒˆใ‚„ใƒใƒƒใƒˆใƒฏใƒผใ‚ฏๆดปๅ‹•๏ผˆ้€š่ฉฑใ‚’ๅซใ‚€๏ผ‰ใซใฏ้ฉๅˆ‡ใงใฏใ‚ใ‚Šใพใ›ใ‚“ใ€‚ ใ“ใ‚Œใ‚‰ใฎใƒใƒชใ‚ทใƒผใซ้•ๅใ™ใ‚‹่€…ใฏใ€ใ‚คใƒ™ใƒณใƒˆไธปๅ‚ฌ่€…ใพใŸใฏใƒ•ใ‚ฉใƒผใƒฉใƒ ็ฎก็†่€…ใฎ่ฃ้‡ใงใ€Code for Kobe ใฎใƒใƒƒใƒˆใƒฏใƒผใ‚ฏๆดปๅ‹•ใ€ใ‚คใƒ™ใƒณใƒˆใ€ใŠใ‚ˆใณใƒ‡ใ‚ธใ‚ฟใƒซใƒ•ใ‚ฉใƒผใƒฉใƒ ใ‹ใ‚‰้€€ๅŽปใ•ใ›ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚\r\n\r\nใƒใƒฉใ‚นใƒกใƒณใƒˆใซใฏใ€ใ‚ธใ‚งใƒณใƒ€ใƒผใ€ๆ€งๅˆฅใ€ๆ€ง็š„ๆŒ‡ๅ‘ใ€้šœๅฎณใ€่บซไฝ“็š„ๅค–่ฆณใ€่บซไฝ“ใฎๅคงใใ•ใ€ไบบ็จฎใ€ๅฎ—ๆ•™ใซ้–ข้€ฃใ™ใ‚‹ๆ”ปๆ’ƒ็š„ใชๅฃ้ ญใพใŸใฏใƒ†ใ‚ญใ‚นใƒˆใซใ‚ˆใ‚‹่จ€ๅŠใŒๅซใพใ‚Œใพใ™ใŒใ€ใ“ใ‚Œใซ้™ๅฎšใ•ใ‚Œใพใ›ใ‚“ใ€‚ ๅ…ฌๅ…ฑ็ฉบ้–“ใซใŠใ‘ใ‚‹ๆ€ง็š„ใ‚คใƒกใƒผใ‚ธ; ๆ„ๅ›ณ็š„ใช่„…่ฟซใ€‚ ใ‚นใƒˆใƒผใ‚ซใƒผ; ใคใใพใจใ„; ใƒใƒฉใ‚นใƒกใƒณใƒˆใฎๅ†™็œŸใ‚„้Œฒ้Ÿณ; ไผš่ฉฑใ‚„ใใฎไป–ใฎๅ‡บๆฅไบ‹ใฎๆŒ็ถš็š„ใชไธญๆ–ญ; ไธ้ฉๅˆ‡ใช็‰ฉ็†็š„ๆŽฅ่งฆ; ไธๅฟซใชๆ€ง็š„ๆณจๆ„; ไธๅฝ“ใช้™คๅค–: ไบบใ‚’่ฆ‹ไธ‹ใ™ใ‚ˆใ†ใช่จ€่ชžใ‚„่กŒๅ‹•; ใชใฉใ‚‚ๅซใฟใพใ™ใ€‚\r\n\r\nๅ‚ๅŠ ่€…ใŒใƒใƒฉใ‚นใƒกใƒณใƒˆ่กŒ็‚บใ‚’่กŒใฃใŸๅ ดๅˆใ€ไธปๅ‚ฌ่€…ใฏใ€Code for Kobe ใฎใƒใƒƒใƒˆใƒฏใƒผใ‚ฏๆดปๅ‹•ใ€ใ‚คใƒ™ใƒณใƒˆใ€ใŠใ‚ˆใณใƒ‡ใ‚ธใ‚ฟใƒซใƒ•ใ‚ฉใƒผใƒฉใƒ ใ‹ใ‚‰ใฎๅŠ ๅฎณ่€…ใธใฎ่ญฆๅ‘Šใ‚„่ฟฝๆ”พใ‚’ๅซใ‚€ใ€้ฉๅˆ‡ใจใฟใชใ•ใ‚Œใ‚‹ๆŽช็ฝฎใ‚’่ฌ›ใ˜ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚\r\n\r\nใ‚ใชใŸใŒใƒใƒฉใ‚นใƒกใƒณใƒˆใ‚’ๅ—ใ‘ใฆใ„ใ‚‹ๅ ดๅˆใฏใ€่ชฐใ‹ใŒใƒใƒฉใ‚นใƒกใƒณใƒˆใ‚’ๅ—ใ‘ใฆใ„ใ‚‹ใ“ใจใซๆฐ—ใฅใ„ใŸใ‚Šใ€ใใฎไป–ใฎๆ‡ธๅฟตใŒใ‚ใ‚‹ๅ ดๅˆใฏใ€ใ™ใใซใ‚คใƒ™ใƒณใƒˆๆ‹…ๅฝ“่€…ใพใŸใฏใƒ•ใ‚ฉใƒผใƒฉใƒ ็ฎก็†่€…ใซ้€ฃ็ตกใ—ใฆใใ ใ•ใ„ใ€‚ [ใ‚คใƒ™ใƒณใƒˆไธปๅ‚ฌ่€…/ใƒ•ใ‚ฉใƒผใƒฉใƒ ็ฎก็†่€…ใฎEใƒกใƒผใƒซใ‚ขใƒ‰ใƒฌใ‚นใจ้›ป่ฉฑ็•ชๅท] ใ‹ใ‚‰้€ฃ็ตกใ™ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ ใ‚คใƒ™ใƒณใƒˆใ‚นใ‚ฟใƒƒใƒ•ใพใŸใฏใƒ•ใ‚ฉใƒผใƒฉใƒ ็ฎก็†่€…ใฏใ€ๅ‚ๅŠ ่€…ใŒๆ–ฝ่จญ/ไผšๅ ดใฎใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใ‚„้–ข้€ฃใฎๆณ•ไปคๅŸท่กŒๆฉŸ้–ขใซ้€ฃ็ตกใ—ใŸใ‚Šใ€ใคใใใ„ใ‚’่กŒใ„ใพใ™ใ€‚ใพใŸใฏใ€ใƒใƒฉใ‚นใƒกใƒณใƒˆใ‚’็ตŒ้จ“ใ—ใŸไบบใŒใ‚คใƒ™ใƒณใƒˆๆœŸ้–“ไธญๅฎ‰ๅ…จใ‚’ๆ„Ÿใ˜ใ‚‰ใ‚Œใ‚‹ใ‚ˆใ†ใชๆ”ฏๆดใ‚’่กŒใ„ใพใ™ใ€‚\r\n\r\nใ‚คใƒ™ใƒณใƒˆไธปๅ‚ฌ่€…ใพใŸใฏใƒ•ใ‚ฉใƒผใƒฉใƒ ็ฎก็†่€…ใซ้€ฃ็ตกใงใใชใ„ๅ ดๅˆใ‚„็ทŠๆ€ฅใฎๅ ดๅˆใฏใ€่ญฆๅฏŸใซ้›ป่ฉฑใ‚’ใ—ใŸใ‚Šใ€่‡ชๅˆ†่‡ช่บซใง็Šถๆณใ‹ใ‚‰้›ข่„ฑใ—ใฆใใ ใ•ใ„ใ€‚\r\n\r\nไธ‹่จ˜ใฎ้›ปๅญใƒกใƒผใƒซใƒ†ใƒณใƒ—ใƒฌใƒผใƒˆใ‚’ไฝฟ็”จใ—ใ€ใƒใƒฉใ‚นใƒกใƒณใƒˆใซใคใ„ใฆใฎ Code for Kobe ใฎ้€ฃ็ตกๅ…ˆ [[email protected]](mailto:[email protected]) ใซ้€ฃ็ตกใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ ๆ•ฐๅคšใ้–‹ๅ‚ฌใ•ใ‚Œใ‚‹ใ‚คใƒ™ใƒณใƒˆๅ…จใฆใซCode for Kobe ใฎใ‚นใ‚ฟใƒƒใƒ•ใŒๅธธใซๅ‚ๅŠ ใ—ใฆใ„ใ‚‹ใจใฏ้™ใ‚‰ใชใ„ใŸใ‚ใ€็‰นๅฎšใฎ็Šถๆณใ‚’่ฉ•ไพกใ™ใ‚‹็ซ‹ๅ ดใซๅธธใซใ„ใ‚‹ใ‚ใ‘ใงใฏใชใ„ใ“ใจใ‚’่ชใ‚ใฆใ„ใพใ™ใ€‚ ใ—ใ‹ใ—ใ€็งใŸใกใฏใ“ใ‚Œใ‚‰ใฎใ‚ฌใ‚คใƒ‰ใƒฉใ‚คใƒณใ‚’ๆไพ›ใ™ใ‚‹ใ“ใจใงใ€ใ“ใ‚Œใ‚‰ใฎไพกๅ€ค่ฆณใ‚’ๅ…ฑๅŒใงๅฎˆใ‚Šใ€ใ™ในใฆใ‚’ๆญ“่ฟŽใ™ใ‚‹็’ฐๅขƒใ‚’ๆไพ›ใงใใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ‚’็ขบ็ซ‹ใ™ใ‚‹ใ“ใจใ‚’ๆœŸๅพ…ใ—ใฆใ„ใพใ™ใ€‚\r\n\r\n็งใŸใกใฏใ‚ใชใŸใฎๅ‚ๅŠ ใ‚’ๅคงๅˆ‡ใซใ—ใฆใŠใ‚Šใ€ใ“ใ‚Œใ‚‰ใฎๆœŸๅพ…ใ‚’ๅบƒ็ฏ„ใซไผใˆใ‚‹ใ“ใจใซใ‚ˆใฃใฆใ€ใ™ในใฆใฎไบบใŒใƒใƒฉใ‚นใƒกใƒณใƒˆใฎใชใ„็’ฐๅขƒใ‚’ๆฅฝใ—ใ‚€ใ“ใจใŒใงใใ‚‹ใ‚ˆใ†ใซ้ก˜ใฃใฆใ„ใพใ™ใ€‚\r\n\r\n## ใ‚ขใƒณใƒใƒใƒฉใ‚นใƒกใƒณใƒˆๅ ฑๅ‘Š็”จ้›ปๅญใƒกใƒผใƒซใƒ†ใƒณใƒ—ใƒฌใƒผใƒˆ\r\n\r\n```\r\n้กŒๅ๏ผšๅฎ‰ๅ…จใซ้–ขใ™ใ‚‹ๅ ฑๅ‘Š[ใ‚คใƒ™ใƒณใƒˆๅ]\r\nๆœฌๆ–‡๏ผšCode for Kobe ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใฎใ‚คใƒ™ใƒณใƒˆใซใŠใ„ใฆใ€ใƒใƒฉใ‚นใƒกใƒณใƒˆใฎไบ‹่ฑกใŒใ‚ใฃใŸใฎใงๅ ฑๅ‘Šใ—ใพใ™ใ€‚๏ผˆๆฐๅใ€ๅ ดๆ‰€ใ€ๆ—ฅไป˜ๅŠใณใ‚คใƒ™ใƒณใƒˆๅ๏ผ‰\r\nๅฎ›ๅ…ˆ๏ผš[email protected]\r\n\r\n```\r\n\r\nไธŠ่จ˜ใฎๆ–‡ๆ›ธใฏใ€[Code for Japan ใฎ่กŒๅ‹•่ฆ็ฏ„](https://github.com/codeforjapan/codeofconduct/blob/master/README.md)ใซๆบ–ๆ‹ ใ—ใฆไฝœๆˆใ—ใฆใ„ใพใ™ใ€‚\r\n" }, { "alpha_fraction": 0.6585648059844971, "alphanum_fraction": 0.7309027910232544, "avg_line_length": 24.121212005615234, "blob_id": "8b0a8707d7d340afe6ea604c76cb2390938e1a99", "content_id": "272454841cceb2638b9d1d9975d9ba204f44a202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2646, "license_type": "no_license", "max_line_length": 122, "num_lines": 66, "path": "/_posts/2016-12-15-meeting23.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: ใ€็ฅ2ๆญณ๏ผใ€‘Code for Kobeๅฎšไพ‹ไผš23rd\r\ndate: 2016-12-15 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n โ‘ ใพใšใฏใ‚ฑใƒผใ‚ญใ‚ซใƒƒใƒˆใ‹ใ‚‰ใฎๆญ“่ซ‡\r\n โ‘กใ€ŒPythonใ‚ฏใƒญใƒผใƒชใƒณใ‚ฐ&ใ‚นใ‚ฏใƒฌใ‚คใƒ”ใƒณใ‚ฐใ€ๅ‡บ็‰ˆ็™บ่กจ๏ผˆๅŠ ่—ค๏ผ‰\r\n โ‘ขใ€Œใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2016ใ€ๅฟœๅ‹Ÿ2้€ฑ้–“ๅ‰ๅ ฑๅ‘Š๏ผˆ้ซ˜ๆฉ‹๏ผ‰\r\n โ‘ฃใ€ŒCode for Japan Summit 2016ใ€ๅ ฑๅ‘Š๏ผˆ่ฅฟ่ฐท๏ผ‰\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/293499197715210/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-23rd-meeting--Adp0gLZodPw~mdvoQsIVdvLmAQ-Abt6ErSExreIDKWPy5wWF)\r\n/ Links -\r\n\r\nใŠๅ“ๆ›ธใ\r\n\r\n+ ๏ฝžไนพๆฏ๏ฝž\r\n+ โ‘ ใพใšใฏใ‚ฑใƒผใ‚ญใ‚ซใƒƒใƒˆใ‹ใ‚‰ใฎๆญ“่ซ‡\r\n+ โ‘กใ€ŒPythonใ‚ฏใƒญใƒผใƒชใƒณใ‚ฐ&ใ‚นใ‚ฏใƒฌใ‚คใƒ”ใƒณใ‚ฐใ€ๅ‡บ็‰ˆ็™บ่กจ๏ผˆๅŠ ่—ค๏ผ‰\r\n+ โ‘ขใ€Œใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2016ใ€ๅฟœๅ‹Ÿ2้€ฑ้–“ๅ‰ๅ ฑๅ‘Š๏ผˆ้ซ˜ๆฉ‹๏ผ‰\r\n+ โ‘ฃใ€ŒCode for Japan Summit 2016ใ€ๅ ฑๅ‘Š๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n+ ๏ฝžไบคๆต๏ฝž\r\n\r\nใ€็ฅ๏ผ’ๆญณใ€‘ใ‚ฑใƒผใ‚ญๅ‡บใฆใใพใ—ใŸใ€‚\r\n\r\n# ใ€ŒPythonใ‚ฏใƒญใƒผใƒชใƒณใ‚ฐ&ใ‚นใ‚ฏใƒฌใ‚คใƒ”ใƒณใ‚ฐใ€ๅ‡บ็‰ˆ\r\n\r\nๅŠ ่—คใ•ใ‚“\r\n\r\nๆ›ธ็ฑ [Pythonใ‚ฏใƒญใƒผใƒชใƒณใ‚ฐ&ใ‚นใ‚ฏใƒฌใ‚คใƒ”ใƒณใ‚ฐ](http://scraping-book.com/) ๆ˜Žๆ—ฅ็™บๅฃฒใงใ™ใ€‚\r\n\r\n2017 1/28 ใซใ€Œword ใŒไฝฟใˆใ‚‹ใใ‚‰ใ„ใฎไบบใ€ๅ‘ใ‘ใงใ€ใ‚นใ‚ฏใƒฌใ‚คใƒ”ใƒณใ‚ฐใฎใ‚คใƒ™ใƒณใƒˆใ‚„ใ‚Šใพใ™ใ€‚\r\n\r\n# COG2016 ้€”ไธญ็ตŒ้Ž\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\nใ€Œๅธ‚ๆฐ‘ใ‚‚ๅค‰ใ‚ใ‚‹ใ€่กŒๆ”ฟใ‚‚ๅค‰ใ‚ใ‚‹!! ใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นใ€\r\n๏ผˆ[็ฅžๆˆธๅธ‚](http://park.itc.u-tokyo.ac.jp/padit/cog2016/area/kinki.html#kobe-shi),\r\n[ไธ‰็”ฐๅธ‚](http://park.itc.u-tokyo.ac.jp/padit/cog2016/area/kinki.html#sanda-shi)๏ผ‰\r\n\r\ngoogle docs ใงๆ›ธใๅง‹ใ‚ใฆใ„ใพใ™ใ€‚ไฝ•ใ‹ๆ€ใ„ใคใ„ใŸใ‚‰ๆ›ธใ่พผใฟใŠ้ก˜ใ„ใ—ใพใ™ใ€‚\r\n\r\n- [็ฅžๆˆธๅธ‚ ๆธ›็ฝๆ•™่‚ฒ](https://docs.google.com/document/d/1bt5t6iXT0WN-IelpPKRHHCh8_fSvW1tk_XZvXkKe9_4/)\r\n- [ไธ‰็”ฐๅธ‚ ้˜ฒ็ฝ](https://docs.google.com/document/d/1DKpcxOkjfMZWO3Io7jbBdl3wIp3xathrD6zrM7qyJwc/)\r\n- [ไธ‰็”ฐๅธ‚ ใ‚ทใƒ†ใ‚ฃใ‚ปใƒผใƒซใ‚น](https://docs.google.com/document/d/1-b1kNqCP-4piKzgND5zMR5O6nTI1wSIPqOWc-1pZI78/)\r\n\r\n# Code for Japan Summit 2016\r\n\r\nๆฅๅนด็ฅžๆˆธ๏ผŸ\r\n\r\n# ใ‚ณใƒผใƒ—ใ“ใ†ใน\r\nFacebook ใง discussion ใ™ใ‚‹ใ‚ˆใ†ใชไผ็”ปใ‚’็ทดใฃใฆใ„ใ‚‹ใฎใงใ€open ใ—ใŸใ‚‰ๅ‚ๅŠ ใ—ใฆใปใ—ใ„ไปถ\r\n\r\n\r\n# ็ฅžๆˆธๅธ‚ใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผไบคๆต่‚ฒๆˆใƒ—ใƒญใ‚ฐใƒฉใƒ \r\nๆพๆ‘ใ•ใ‚“\r\n\r\n- [Kobe Global Startup Gateway](http://kobe.globalstartupgw.com/) ้–‹ๅง‹\r\n- ็ฅžๆˆธใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใ‚ชใƒ•ใ‚ฃใ‚นใ‚’ๆ‹ ็‚นใซ KOBE Seed Acceleration Program ้–‹ๅง‹\r\n- 500 startups ้€ฃๆบไบ‹ๆฅญใ€Œ[500 KOBE Pre-Accelerator](http://jp.500kobe.com/)ใ€ๅฎŸๆ–ฝ\r\n\r\n\r\n" }, { "alpha_fraction": 0.7063784003257751, "alphanum_fraction": 0.7558919191360474, "avg_line_length": 21.231155395507812, "blob_id": "3a60b75601b452bf8be0e726c2bc2f7124b4628d", "content_id": "ffe5a1464a4e3aa14ac150773284df7201be9e6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9195, "license_type": "no_license", "max_line_length": 144, "num_lines": 199, "path": "/_posts/2018-05-17-meeting39.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš39th\r\ndate: 2018-05-17 19:00:00 +09:00\r\nauthor: ๅ‚ๅŠ ่€…\r\ncomments: true\r\ndescription: |\r\n (1)ใ‚ฝใƒผใ‚ทใƒฃใƒซใƒใƒƒใ‚ฏใƒ‡ใƒผใซใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆๆŒใก่พผใ‚“ใงใฟใ‚‹ไปถ(่ฅฟ่ฐท)10ๅˆ†\r\n (2)NASAใƒใƒƒใ‚ซใ‚ฝใƒณ็ฅžๆˆธใงใ‚‚ใ‚„ใ‚Šใพใ™๏ผ(่ฅฟ่ฐท)10ๅˆ†\r\n (3)ๅŠ ๅคๅทๅธ‚ใƒ‡ใƒผใ‚ฟใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ (ๆฐด้‡Ž)15ๅˆ†\r\n (4)ใƒ•ใƒผใƒ‰ใƒ†ใƒƒใ‚ฏใงใ“ใ‚“ใชใฎใ—ใพใ™๏ผ(่Šๆฑ )10ๅˆ†\r\n (5)ไป‹่ญทใ‚ฟใ‚ฏใ‚ทใƒผใƒžใƒƒใƒใƒณใ‚ฐใ‚ขใƒ—ใƒชใฎๅฎŸ่จผๅฎŸ้จ“ใซใคใ„ใฆ(ๅ†…่—ค)15ๅˆ†\r\n (6)Kobe.R(ๆฒณๅŽŸ)5ๅˆ†\r\n (7)ไธ‰็”ฐๅธ‚ใ‚คใƒ™ใƒณใƒˆๅ‘Š็Ÿฅ(้ซ˜ๆฉ‹)5ๅˆ†\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/621570978187303/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/HkQG7R5Rf)\r\n/ Links:\r\n\r\n# Code for Kobe 39th\r\n\r\n<https://www.facebook.com/events/621570978187303/>\r\n\r\nITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ŒCode for Kobeใ€ใฎ็ฌฌ39ๅ›žๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผๅˆใ‚ใพใ—ใฆใฎๆ–นใ‚‚ใฉใชใŸใงใ‚‚ๅ‚ๅŠ ๆญ“่ฟŽใงใ™๏ผ\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n(<http://www.kigyoplaza-hyogo.jp/>)\r\nโ€ป19ๆ™‚ไปฅ้™ๅ…ฅ้คจ็ตŒ่ทฏใŒ่ค‡้›‘ใซใชใ‚Šใพใ™๏ผ้…ๅˆปใ•ใ‚Œใ‚‹ๆ–นใฏใƒกใƒƒใ‚ปใƒผใ‚ธ็ญ‰ใงใŠ็Ÿฅใ‚‰ใ›ใใ ใ•ใ„ใ€‚\r\n\r\n2.ใŠๅ“ๆ›ธใ(ๆ•ฌ็งฐ็•ฅ)\r\n- ใ€œไนพๆฏใƒป่ซธ่ชฌๆ˜Žใ€œ\r\n- (1)ใ‚ฝใƒผใ‚ทใƒฃใƒซใƒใƒƒใ‚ฏใƒ‡ใƒผใซใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆๆŒใก่พผใ‚“ใงใฟใ‚‹ไปถ(่ฅฟ่ฐท)10ๅˆ†\r\n- (2)NASAใƒใƒƒใ‚ซใ‚ฝใƒณ็ฅžๆˆธใงใ‚‚ใ‚„ใ‚Šใพใ™๏ผ(่ฅฟ่ฐท)10ๅˆ†\r\n- (3)ๅŠ ๅคๅทๅธ‚ใƒ‡ใƒผใ‚ฟใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ (ๆฐด้‡Ž)15ๅˆ†\r\n- (4)ใƒ•ใƒผใƒ‰ใƒ†ใƒƒใ‚ฏใงใ“ใ‚“ใชใฎใ—ใพใ™๏ผ(่Šๆฑ )10ๅˆ†\r\n- (5)ไป‹่ญทใ‚ฟใ‚ฏใ‚ทใƒผใƒžใƒƒใƒใƒณใ‚ฐใ‚ขใƒ—ใƒชใฎๅฎŸ่จผๅฎŸ้จ“ใซใคใ„ใฆ(ๅ†…่—ค)15ๅˆ†\r\n- (6)Kobe.R(ๆฒณๅŽŸ)5ๅˆ†\r\n- (7)ไธ‰็”ฐๅธ‚ใ‚คใƒ™ใƒณใƒˆๅ‘Š็Ÿฅ(้ซ˜ๆฉ‹)5ๅˆ†\r\n- โ€ปใใฎไป–็พๅœจไผ็”ปใƒป่ชฟๆ•ดไธญ\r\n- โ€ปใ”ๅธŒๆœ›ใ‚ใ‚Œใฐ้šๆ™‚ๅ—ใ‘ไป˜ใ‘ใพใ™๏ผ\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\n1,000ๅ††(ๅญฆ็”Ÿไปฅไธ‹็„กๆ–™)\r\n\r\nโ˜†Code of Conduct(่กŒๅ‹•่ฆ็ฏ„)\r\nCode for Kobeใงใฏๅ‚ๅŠ ่€…ใฎ่ชฐใ‚‚ใŒๅฟซใๆดปๅ‹•ใงใใ‚‹ใ‚ˆใ†ใ€Code for Japanใซๆบ–ๆ‹ ใ™ใ‚‹ๅฝขใง่กŒๅ‹•่ฆ็ฏ„ใ‚’่จญๅฎšใ—ใฆใ„ใพใ™ใ€‚ๅฝ“ใ‚คใƒ™ใƒณใƒˆใซๅ‚ๅŠ ใ„ใŸใ ใๆ–นใซใฏๅŒๆ„ใ„ใŸใ ใๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ใฉใ†ใžไบ‹ๅ‰ใซใŠ็›ฎ้€šใ—ใใ ใ•ใ„ใ€‚\r\n<https://github.com/codeforjapan/codeofconduct/blob/master/README.md>\r\nๅฝ“ใ‚คใƒ™ใƒณใƒˆใงไฝ•ใ‚‰ใ‹ใฎใƒใƒฉใ‚นใƒกใƒณใƒˆใซ้ญ้‡ใ—ใŸๅ ดๅˆใ€Code for KobeใฎFacebookๅ…ฌๅผใƒšใƒผใ‚ธใซใƒกใƒƒใ‚ปใƒผใ‚ธใ„ใŸใ ใใ‹ใ€[email protected]ใพใงใƒกใƒผใƒซใ‚’ใŠ้ก˜ใ„ใ„ใŸใ—ใพใ™ใ€‚\r\n\r\n\r\nไปŠๅ›žใฏใ‚ฐใƒฉใƒ•ใ‚ฃใƒƒใ‚ฏใƒฌใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใ‚‚่กŒใ‚ใ‚Œใฆใ„ใพใ™ใ€‚\r\n\r\n\r\n## ใ‚ฝใƒผใ‚ทใƒฃใƒซใƒใƒƒใ‚ฏใƒ‡ใƒผใซใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆๆŒใก่พผใ‚“ใงใฟใ‚‹ไปถ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\n\"Social Hack Day\"\r\n- <https://hackday.code4japan.org/>\r\n- <https://www.facebook.com/events/463786844040643/>\r\n\r\n2018/6/30 ๅœŸๆ›œๆ—ฅ้–‹ๅ‚ฌใงใ™ใ€‚ๅ ดๆ‰€ใฏใ€Œใ“ใ“๏ผˆใ“ใฎๅฎšไพ‹ใŒ้–‹ๅ‚ฌใ•ใ‚Œใฆใ„ใ‚‹้ƒจๅฑ‹๏ผ‰ใ€ใฎไบˆๅฎšใงใ™ใ€‚\r\n\r\n\r\n## NASAใƒใƒƒใ‚ซใ‚ฝใƒณ็ฅžๆˆธใงใ‚‚ใ‚„ใ‚Šใพใ™๏ผ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ : <https://2017.spaceappschallenge.org/>\r\n\r\nNASA ใŒๅ…ฌ้–‹ใ—ใฆใ„ใ‚‹ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚’ไฝฟใฃใฆใƒใƒƒใ‚ซใ‚ฝใƒณใ‚’ๅฎŸๆ–ฝใ™ใ‚‹ใ€‚\r\nๅ…ˆๆ—ฅใฎ 078 ใงใ‚ญใƒƒใ‚ฏใ‚ชใƒ•็š„ใชใ‚ปใƒƒใ‚ทใƒงใƒณใ‚’้–‹ใใพใ—ใŸใ€‚<https://078kobe.jp/events/4440/>\r\n\r\n\r\nๅ‰ฒใจใƒใƒผใƒ‰็›ฎใชใ‚คใƒ™ใƒณใƒˆใจใฎใ“ใจใ€‚\r\n\r\n\r\n## ๅŠ ๅคๅทๅธ‚ใƒ‡ใƒผใ‚ฟใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ \r\nๆฐด้‡Žใ•ใ‚“๏ผ ๅŠ ๅคๅทๅธ‚ไผ็”ป้ƒจๆƒ…ๅ ฑๆ”ฟ็ญ–่ชฒ\r\n\r\nๅŠ ๅคๅทๅธ‚ 30ๅนดๅบฆใฎไธปใชๆ”ฟ็ญ–\r\n- ่ปขๅ‡บใŒๅคšใ๏ผˆ่ปขๅ‡บ่ถ…ๅ…จๅ›ฝ9ไฝ๏ผ‰่ชฒ้กŒใจ่ช่ญ˜ใ•ใ‚Œใฆใ„ใ‚‹\r\n- ใ‚ทใƒ†ใ‚ฃใƒ—ใƒญใƒขใƒผใ‚ทใƒงใƒณ\r\n- ้˜ฒ็Šฏใ‚ซใƒกใƒฉใฎ่จญ็ฝฎ๏ผ‹BLEใ‚ฟใ‚ฐ๏ผˆ่ฆ‹ๅฎˆใ‚Šใ‚ฟใ‚ฐ๏ผ‰\r\n\r\nๅŠ ๅคๅทๅธ‚ใฎใƒ‡ใƒผใ‚ฟๅˆฉๆดป็”จ\r\n- ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚ซใ‚ฟใƒญใ‚ฐใ‚ตใ‚คใƒˆ๏ผˆๆ˜จๅนด9ๆœˆ้–‹ๅง‹๏ผ‰\r\n- ๆฉŸๆขฐๅˆค่ชญใ—ใ‚„ใ™ใ„ใƒ‡ใƒผใ‚ฟใซ้™ใฃใฆใ„ใ‚‹\r\n - CSV ใ‚’ไธญๅฟƒใซใ—ใฆใ„ใ‚‹\r\n\r\n๏ผˆๅ‚่€ƒ๏ผ‰็ทๅ‹™็œICT่ก—ใฅใใ‚Šไผš่ญฐ\r\nๅœฐๅŸŸIOTๅฎŸ่ฃ…ใ‚ฟใ‚นใ‚ฏใƒ•ใ‚ฉใƒผใ‚นใงใ‚นใƒžใƒผใƒˆใ‚ทใƒ†ใ‚ฃๆŽจ้€ฒใ‚’ใ‚„ใฃใฆใ„ใ‚‹\r\nใ€Œใƒ‡ใƒผใ‚ฟๅˆฉๆดป็”จๅž‹ใ‚นใƒžใƒผใƒˆใ‚ทใƒ†ใ‚ฃๆŽจ้€ฒไบ‹ๆฅญใ€ใฎ่ฃœๅŠฉ้‡‘\r\n\r\nใ€Œใƒ‡ใƒผใ‚ฟใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ ใ€ใ‚’ไธญๅฟƒใซไบ‹ๆฅญใ‚’็ต„ใฟ็ซ‹ใฆใŸ\r\n- GISใƒ€ใƒƒใ‚ทใƒฅใƒœใƒผใƒ‰ <https://gis.opendata-api-kakogawa.jp/>\r\n- ๅŠ ๅคๅทใ‚ขใƒ—ใƒช <http://www.city.kakogawa.lg.jp/soshikikarasagasu/kikakubu/jouhouseisakuka/kakogawa_app.html>\r\n\r\nNGSI ใจๅ…ฑ้€š่ชžๅฝ™ๅŸบ็›ค : ๅŠ ๅคๅทๅธ‚ใ€้ซ˜ๆพๅธ‚ใ€็ฆๅฒก็œŒใฎๆฐ‘้–“ไธ€็คพใงๆŽก็”จใŒใ‚ใ‚‹ใ€‚ๆ™ฎๅŠใ™ใ‚‹ใฎใ‹ไธๅฎ‰ใ‚’ๆŒใฃใฆใ„ใ‚‹ใ€‚\r\nๆฐ‘้–“ใ€ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใ‹ใ‚‰ใƒ‡ใƒผใ‚ฟๆไพ›ใŒใ‚ใ‚ŒใฐๆŽฒ่ผ‰ใ—ใŸใ„ใจๆ€ใฃใฆใ„ใ‚‹ใ€‚โ†’ใƒใƒณใ‚บใ‚ชใƒณใŒๅฟ…่ฆใ€‚ๆŠ˜่ง’ใ ใ‹ใ‚‰ใ‚ฝใƒผใ‚ทใƒฃใƒซใƒใƒƒใ‚ฏใƒ‡ใƒผใงใ‚„ใฃใฆใฟใฆใฏใฉใ†ใ‹๏ผŸ\r\n\r\n\r\n## ใƒ•ใƒผใƒ‰ใƒ†ใƒƒใ‚ฏใงใ“ใ‚“ใชใฎใ—ใพใ™๏ผ\r\n่Šๆฑ ใ•ใ‚“๏ผ ๆ ชๅผไผš็คพใƒ•ใƒผใƒ‰ใƒ”ใ‚ฏใƒˆ\r\n\r\nใ€Œ2.4 ไบบใซ 1 ไบบใฏไฝ•ใ‹ใ—ใ‚‰้ฃŸในใ‚‰ใ‚Œใชใ„ใ‚‚ใฎใŒใ‚ใ‚‹ใ€ใจใ„ใ†็Šถๆณใซใ‚ใ‚‹ใ€‚\r\n้ฃŸๆ่กจ็คบใฎ็ขบ่ชใง่จ€่ชžใฎใƒใƒชใ‚ขใŒใ‚ใ‚‹ใฎใ‚’่งฃๆถˆใ—ใŸใ„ใ€‚\r\n\r\nใƒ•ใƒผใƒ‰ใƒ”ใ‚ฏใƒˆใฏๅ…จๅ›ฝ80ๆณ•ไบบ755็ฎ‡ๆ‰€1400็‚นใซๆŽก็”จใ•ใ‚Œใฆใ„ใฃใฆใ„ใ‚‹ใ€‚\r\n\r\nJapan food guide <http://japan-foodguide.jp/>\r\n\r\nไพ‹ใˆใฐใƒ›ใƒ†ใƒซใฎใƒ“ใƒฅใƒƒใƒ•ใ‚งใงใฏ 100-150 ใฎใ†ใก 90 ใใ‚‰ใ„ใŒๆœˆ้–“ๅ…ฅใ‚Œๆ›ฟใ‚ใ‚‹ใ€‚ใƒกใƒ‹ใƒฅใƒผใฎใ‚ซใƒผใƒ‰๏ผˆใƒ•ใƒผใƒ‰ใƒ”ใ‚ฏใƒˆๅ…ฅใ‚Š๏ผ‰ใ‚’ไฝœใ‚‹ใ ใ‘ใงใ‚‚็›ธๅฝ“ใชๆ‰‹้–“ใŒใ‹ใ‹ใฃใฆใ„ใ‚‹ใ€‚ๅŠน็އๅŒ–ใ™ใ‚‹ๆ‰‹ๆณ•ใŒๆฌฒใ—ใ„ใ€‚ๅ‹Ÿ้›†ไธญใ€‚\r\n\r\n็ฝๅฎณๆ™‚้žๅธธ้ฃŸใฎใ‚ขใƒฌใƒซใ‚ฒใƒณ่กจ็คบใ€‚ๅ…ทไฝ“็š„ใซ้ฟ้›ฃๆ‰€ใงไฝฟใˆใ‚‹ใƒ„ใƒผใƒซใจใ—ใฆ้–‹็™บใ—ใฆใ„ใ‚‹ใ€‚้žๅธธ้ฃŸใฎใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใ‚’้–‹ๅ‚ฌใ™ใ‚‹ไบˆๅฎšใ€‚user generated ใƒ‡ใƒผใ‚ฟใƒ™ใƒผใ‚นใ‚’ไฝœใ‚ŠใŸใ„ใ€‚ใ“ใ‚Œใ‚‚ๆ‰‹ๆณ•ใฎใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขๅ‹Ÿ้›†ไธญใ€‚\r\n\r\nFUTURE FOOD TECH New York :<https://futurefoodtechnyc.com/>\r\n- block chain ใง้ฃŸๅ“ใฎใƒˆใƒฌใƒผใ‚ตใƒ“ใƒชใƒ†ใ‚ฃๆƒ…ๅ ฑใ‚’ๅ…ฑๆœ‰ใ™ใ‚‹\r\n- ๆ˜†่™ซ้ฃŸ\r\n- 7/7 ใซ็™บ่กจไผš้–‹ๅ‚ฌไบˆๅฎš๏ผš<https://www.facebook.com/events/1670137526416266/>\r\n - Inclusive design\r\n - FOOD Tech\r\n\r\n\r\nQ/A\r\n- ใฉใ†ใ‚„ใฃใฆๆ€ใ„ใคใ„ใŸใ‹\r\n - ๅค–ๅคงใงๅค–ๅ›ฝไบบใฎไบบใจๆŽฅใ—ใฆ\r\n\r\n- ใƒ‡ใƒ•ใ‚กใ‚ฏใƒˆใ‚นใ‚ฟใƒณใƒ€ใƒผใƒ‰ใจใ‚‹ๆ–นๆณ•\r\n - ่กŒๆ”ฟใจ็ต„ใ‚€\r\n - ใƒฉใ‚คใ‚ปใƒณใ‚นๆ–™\r\n - ใ‚ตใƒผใƒ“ใ‚นใซไน—ใฃใ‘ใ‚‹\r\n - ISO็™ป้Œฒ\r\n - ใƒ”ใ‚ฏใƒˆใ‚ฐใƒฉใƒ ่‡ชไฝ“ใฏ ISO 7001 <https://www.iso.org/obp/ui/#search> ใชใฉใซ็™ป้Œฒใ—ใ€ใ‚ฆใ‚งใƒ–ใ‚ตใƒผใƒ“ใ‚นใชใฉใงไบ‹ๆฅญใ‚’ๆˆ็ซ‹ใ•ใ›ใŸใ„\r\n\r\n\r\nใƒžใƒใ‚ฟใ‚คใ‚บ\r\nWebใ‚ตใƒผใƒ“ใ‚น\r\n\r\n- ๆ‘‚ๅ–้‡ใซใคใ„ใฆใฎ่ช็Ÿฅใซใ‚‚ๅฝน็ซ‹ใฆใ‚‰ใ‚Œใ‚‹ใจใ†ใ‚Œใ—ใ„\r\n\r\n- ใƒ‡ใƒผใ‚ฟใฎไฟก้ ผๆ€งใฎๆ‹…ไฟใฎๆ–นๆณ•ใฏ๏ผŸ\r\n - ไปŠใฎๅคงๆ‰‹ไบ‹ๆฅญ่€…ใงๆŽก็”จใ•ใ‚Œใ„ใฆใ„ใ‚‹้–“ใฏใ€ๅ“่ณช็ฎก็†้ƒจ้–€ใงๆ‹…ไฟใ•ใ‚Œใฆใ„ใ‚‹\r\n - ไปŠๅพŒใฎๅฑ•้–‹ใจใ—ใฆใฏใ€ๆคœ็ดขใ‚ตใƒผใƒ“ใ‚นใ‚‚็ต„ใฟ่พผใ‚“ใงใ€็”Ÿ็”ฃ่€…ใซ็ขบ่ชใงใใ‚‹ใ‚ˆใ†ใชใƒ—ใƒญใ‚ปใ‚นใง่€ƒใˆใฆใ„ใ‚‹\r\n\r\n\r\n## ไป‹่ญทใ‚ฟใ‚ฏใ‚ทใƒผใƒžใƒƒใƒใƒณใ‚ฐใ‚ขใƒ—ใƒชใฎๅฎŸ่จผๅฎŸ้จ“ใซใคใ„ใฆ\r\nๅ†…่—คใ•ใ‚“\r\n\r\nUrban Innovation KOBE 2017 ็ฌฌไธ€ๆœŸ\r\n- <http://urban-innovation-kobe.com/project/rescue/>\r\n\r\nๅฎŸ่จผๅฎŸ้จ“\r\n- <https://www.facebook.com/kobestartupnews/posts/963141003850668>\r\n- ๅฎŸ่จผๅฎŸ้จ“ๆœŸ้–“๏ผšๅนณๆˆ30ๅนด5ๆœˆ14ๆ—ฅ(ๆœˆๆ›œ)ใ‹ใ‚‰6ๆœˆ30ๆ—ฅ(ๅœŸๆ›œ)\r\n- ไปŠใ‚„ใฃใฆใพใ™๏ผ\r\n\r\nใ€Œใ•ใฝใฎใ‚‹ใ€ใ‚ขใƒ—ใƒช\r\n- ใ‚ชใƒฌใƒณใ‚ธใŒๅˆฉ็”จ่€…ๅ‘ใ‘ <https://play.google.com/store/apps/details?id=com.simpleappli.saponoru.android>\r\n- ็ท‘ใŒไบ‹ๆฅญ่€…ๅ‘ใ‘ <https://play.google.com/store/apps/details?id=com.simpleappli.saponoru.carrier.android>\r\n - 20 ็คพใปใฉใŒ็™ป้Œฒใ•ใ‚Œใฆใ„ใ‚‹\r\n\r\nใ‚ฑใ‚ขใƒžใƒใƒผใ‚ธใƒฃใƒผใฎไบบใŒ้…่ปŠใฎๆ‰‹้…ใ‚’ใ—ใฆใ„ใ‚‹ๅฎŸๆ…‹ใŒใ‚ใฃใŸใฎใงใ€ใใฎ่ฒ ่ทใ‚’ไธ‹ใ’ใ‚‹ใ“ใจใ‚’็ฌฌไธ€ใฎ็›ฎๆจ™ใจใ—ใฆไฝœๆˆใ—ใŸใ€‚้›ป่ฉฑใฎๆ‰‹้–“ใจไพกๆ ผใฎ่ชฟๆŸปใŒๅคงใใชๆ‰‹้–“ใซใชใฃใฆใ„ใ‚‹ใ€‚ๆ‰‹้…ใฎ้š›ใซใ€Œใ‚ชใƒ—ใ‚ทใƒงใƒณ๏ผˆๆกไปถ๏ผ‰ใ€ใ‚’ๆŒ‡ๅฎšใงใใ‚‹ใฎใŒ็‰นๅพดใ€‚ไพ‹ใˆใฐใ€Œ้šŽๆฎตใฎไป‹ๅŠฉใ€ใ€Œ่ปŠใ„ใ™ใงไน—ใ‚Š่พผใ‚ใ‚‹ใ€ใ€Œ่ปŠใ„ใ™ใŒๅ€Ÿใ‚Šใ‚‰ใ‚Œใ‚‹ใ€ใจใ„ใฃใŸๆกไปถใ€‚\r\n\r\nๅˆฉ็”จ่€…ใŒไบˆ็ด„ไปฎ็™ป้Œฒใ—ใ€ไบ‹ๆฅญ่€…ใŒไบˆ็ด„ๆ‰ฟ่ชใ™ใ‚‹ใ€ใจใ„ใ†ใƒ•ใƒญใƒผใซใชใฃใฆใ„ใ‚‹ใ€‚\r\n\r\n#7119 ็ฅžๆˆธๅธ‚ใงใ‚„ใฃใฆใ‚‹้›ป่ฉฑ็›ธ่ซ‡(7ๅ‰ฒใŒ็ทŠๆ€ฅๅบฆๅˆใ„ไฝŽใ„)\r\n็—…้™ขใซ่กŒใใŸใ‚ใฎๆ–ฐใ—ใ„้ธๆŠž่‚ขใจใ—ใฆใ€่บซ่ฟ‘ใซๆไพ›ใงใใ‚Œใฐใจ่€ƒใˆใฆใ„ใ‚‹ใ€‚\r\n\r\nใŸใ ใ—ใ€ใ ใ‹ใ‚‰ใจ่จ€ใฃใฆ119ใ—ใฆใ„ใ‘ใชใ„ใจใ„ใ†ใ“ใจใฏไธ€ๅˆ‡ใชใใ€ไธๅฎ‰ใŒใ‚ใ‚Š็—‡็ŠถใŒใ‚ใ‚Œใฐๅฟ…ใš119ใ—ใฆๆฌฒใ—ใ„ใ€‚ๆถˆ้˜ฒ้šŠๅ“กใฏใฟใชๆฏŽๆ—ฅๆ‡ธๅ‘ฝใซๅ–ใ‚Š็ต„ใ‚“ใงใ„ใพใ™๏ผ\r\n\r\n## Kobe.R\r\nๆฒณๅŽŸใ•ใ‚“\r\n\r\nใƒ‡ใƒผใ‚ฟๅˆ†ๆžๅ‹‰ๅผทไผšใ‚’้–‹ๅ‚ฌใ—ใฆใ„ใพใ™ใ€‚ๆœ€่ฟ‘ๅพฉๆดปใ—ใพใ—ใŸใ€‚\r\n<https://kobexr.doorkeeper.jp/>\r\n\r\nไพ‹ใˆใฐๅ‰ๅ›ž\r\n- ๅญฆ้š›็š„ๅˆ†้‡Žใฎใ‚นใƒขใƒผใƒซใƒ‡ใƒผใ‚ฟใ‚’ไฝฟใฃใŸๅˆ†ๆžไบ‹ไพ‹\r\n- R ใ‹ใ‚‰ TensorFlow ใ‚’ไฝฟใฃใฆใฟใ‚‹\r\n- PySpark\r\n- Data Science and Engineering in Canada and US\r\n- ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚’ๆดป็”จใ—ใŸใ‚ขใƒ—ใƒช้–‹็™บ\r\n- ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\n\r\nใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใซ้–ขใ—ใฆใฏใ€ใ€Œๅ˜ใชใ‚‹ๅฏ่ฆ–ๅŒ–ใ€ใฎใใฎๅ…ˆใฎๆทฑใ„ๅˆ†ๆžใŒใงใใŸใ‚‰ใ„ใ„ใชใจๆ€ใฃใฆใ„ใ‚‹ใ€‚\r\n\r\n\r\n## ไธ‰็”ฐๅธ‚ใ‚คใƒ™ใƒณใƒˆๅ‘Š็Ÿฅ\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\nใ€Œใ‚นใƒผใƒ‘ใƒผๅ…ฌๅ‹™ๅ“กใ€ใฎใ‹ใŸใ‚’ใŠๆ‹›ใใ—ใฆใ‚คใƒ™ใƒณใƒˆ้–‹ๅ‚ฌไบˆๅฎšใ—ใฆใ„ใพใ™ใ€‚\r\n\r\nใ€Œใกใชใฃใกใ‚ƒdoใ‹ใ‚‰ๅง‹ใ‚ใ‚‹ๅœฐๅŸŸๆดปๆ€งๅŒ–ใ€6/23 ใซไธ‰็”ฐๅธ‚ใพใกใฅใใ‚Šๅ”ๅƒใ‚ปใƒณใ‚ฟใƒผๅคš็›ฎ็š„ใƒ›ใƒผใƒซใซใฆ้–‹ๅ‚ฌไบˆๅฎšใ€‚\r\n\r\n" }, { "alpha_fraction": 0.7120662927627563, "alphanum_fraction": 0.7586742639541626, "avg_line_length": 21.814815521240234, "blob_id": "d42ff1bb5e90c1431e531d2caddf9b58c6063ae3", "content_id": "19d17be6aebb80ebbf68eac1489585c4f289eacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3955, "license_type": "no_license", "max_line_length": 122, "num_lines": 81, "path": "/_posts/2016-09-15-meeting20.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: ็ฌฌ20ๅ›žCode for Kobeๅฎšไพ‹ไผš\r\ndate: 2016-09-15 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n ARๅทจไบบๅฐ†ๆฃ‹๏ผˆๅ–œๅคš๏ผ‰\r\n ใ‚ณใƒผใƒ—ใ“ใ†ในใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณๅ ฑๅ‘Š๏ผˆๅœ“ไบ•๏ผ‰\r\n ใƒ‰ใ‚ณใƒข่ฆ‹ๅฎˆใ‚Šไบ‹ๆฅญ๏ผˆๅฑฑๆœฌ๏ผ‰\r\n ใ‚ขใ‚ฏใƒ†ใ‚ฃใƒ–ใ‚จใ‚คใ‚ธใƒณใ‚ฐใ‚’ITใƒปไบบๅทฅ็Ÿฅ่ƒฝใซใ‚ˆใ‚Šๆ”ฏๆดๅผทๅŒ–๏ผˆ็€ง๏ผ‰\r\n WORLD DATA VIZ CHALLENGE 2nd Stageๅ‚ๅŠ ่€…ๅ‹Ÿ้›†๏ผˆๅทไบ•๏ผ‰\r\n ใใฎไป–ใ”ๅธŒๆœ›ใ‚ใ‚Œใฐๅ—ใ‘ไป˜ใ‘ใพใ™๏ผ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1081678858534191/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-20th-meeting--AdokohnAjV6ghglFbzw5sP~IAQ-0pwVOFL3UemIrbFghRPPq)\r\n/ Links -\r\n\r\nagenda\r\n\r\n- ARๅทจไบบๅฐ†ๆฃ‹๏ผˆๅ–œๅคš๏ผ‰\r\n- ใ‚ณใƒผใƒ—ใ“ใ†ในใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณๅ ฑๅ‘Š๏ผˆๅœ“ไบ•๏ผ‰\r\n- ใƒ‰ใ‚ณใƒข่ฆ‹ๅฎˆใ‚Šไบ‹ๆฅญ๏ผˆๅฑฑๆœฌ๏ผ‰\r\n- ใ‚ขใ‚ฏใƒ†ใ‚ฃใƒ–ใ‚จใ‚คใ‚ธใƒณใ‚ฐใ‚’ITใƒปไบบๅทฅ็Ÿฅ่ƒฝใซใ‚ˆใ‚Šๆ”ฏๆดๅผทๅŒ–๏ผˆ็€ง๏ผ‰\r\n- WORLD DATA VIZ CHALLENGE 2nd Stageๅ‚ๅŠ ่€…ๅ‹Ÿ้›†๏ผˆๅทไบ•๏ผ‰\r\n\r\n# ARๅทจไบบๅฐ†ๆฃ‹\r\nๅ–œๅคšใ•ใ‚“๏ผˆ็ฅžๆˆธๅคงๅญฆ๏ผ‰\r\n\r\nJST RISTEX ใงใ€‚ๅทจๅคงใชๅฐ†ๆฃ‹็›คใฎไธŠใง่ฉฐๅฐ†ๆฃ‹ใ‚’ใ™ใ‚‹ใ€‚\r\n่ฆ–่ฆš้šœใŒใ„่€…ใฎใŸใ‚ใฎๆ”ฏๆดใ‚ทใ‚นใƒ†ใƒ \r\n\r\nๅบŠ๏ผˆ90x90cm ใฎไธญๅฟƒ 30x30cm ใซใ‚ปใƒณใ‚ตใƒผใง 5x5 ใฎใƒžใ‚น๏ผ‰ใจ้ดใซใ‚ปใƒณใ‚ตใƒผใ€‚้ŸณๅฃฐใจๆŒฏๅ‹•ใงๆกˆๅ†…ใ™ใ‚‹ใ€‚\r\n่ก—ไธญใ‚’ๆญฉใ„ใฆใ„ใ‚‹็ตŒ้จ“ใŒใ‚ใ‚‹ไบบใปใฉไธŠๆ‰‹ใใงใใŸใ€‚\r\n\r\nๅคš้ขๆŒ‡ใ—ใซๆฅใฆใ„ใŸใ ใ„ใฆใ‚„ใฃใŸใจใใฏใ€ๅฃฐใซๅ‡บใ•ใชใ„ใจใ„ใ‘ใชใ„ใฎใŒๅคงๅค‰ใ ใฃใŸใใ†ใ€‚\r\n้‡‘ใจ้Š€ใŒ่žใๅ–ใ‚Šใซใใ„ใฎใงใ€gold, silver ใง็™บ็”Ÿใ—ใฆใ„ใ‚‹ใ“ใจใ‚‚ๅคšใ„ใ€‚\r\n\r\n็Ÿณๅทๆตฉๅ…ˆ็”ŸใŒไธญๅฟƒใซใชใฃใฆๆŽจ้€ฒใ•ใ‚Œใฆใ„ใ‚‹ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ http://www8.cao.go.jp/shougai/suishin/seisaku_iinkai/\r\nใ€Œๅคšไธ–ไปฃๅ…ฑๅ‰ตใซใ‚ˆใ‚‹่ฆ–่ฆš้šœใŒใ„่€…็งปๅ‹•ๆ”ฏๆดใ‚ทใ‚นใƒ†ใƒ ใ€\r\n\r\nใ“ใ‚Œใพใงใฏใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใซๆฒฟใฃใฆไผๆฅญใ‚’ไฝœใฃใŸใ‚Šใ—ใฆใใŸใ‘ใ‚Œใฉใ€ๆ™ฎๅŠใ•ใ›ใ‚‹ใฎใŒ้›ฃใ—ใ„ใ€‚ๆ–นๅ‘่ปขๆ›ใ—ใฆใ€ใ‚ณใƒขใƒ‡ใ‚ฃใƒ†ใ‚ฃๅŒ–ใ•ใ‚ŒใŸๆŠ€่ก“ใ‚’ไฝฟใฃใฆๅฎŸ่ฃ…ใ™ใ‚‹ๆ–น้‡ใงใ™ใ™ใ‚ใ‚‹ใ‚ˆใ†ใซใชใฃใŸใ€‚\r\nใƒ‘ใƒฉใƒชใƒณใƒ”ใƒƒใ‚ฏใซๅ‘ใ‘ใฆใ€ใƒŠใƒ“ใ‚ฒใƒผใ‚ทใƒงใƒณใฎๅฎŸ้จ“ใจใ—ใฆ AR ๅทจไบบๅฐ†ๆฃ‹ใ‚’ใ‚„ใฃใŸใ€‚\r\n\r\n[ๆต…ๅทๆ™บๆตๅญ](https://ja.wikipedia.org/wiki/%E6%B5%85%E5%B7%9D%E6%99%BA%E6%81%B5%E5%AD%90)ใ•ใ‚“ (IBM) ใ‚‚ใ“ใฎๅˆ†้‡Žใฏ่ฉณใ—ใ„ใ€‚\r\n\r\n้–ข้€ฃ็ ”็ฉถใจใ—ใฆใฏใ€ๆฒกๅ…ฅๅž‹ใฎใƒ—ใƒญใ‚ธใ‚งใ‚ฏใ‚ทใƒงใƒณ็ฎฑใ‚’ไฝฟใฃใฆใ€่ฆ–่ฆš้šœๅฎณ๏ผˆ็‹ญ่ฆ–้‡Ž๏ผ‰ใ‚„ๆญฉใใ‚นใƒžใƒ›ๆ™‚ใฎ\r\n\r\n็ฅžๆˆธใ‚ขใ‚คใ‚ปใƒณใ‚ฟใƒผใง็ ”็ฉถใงใใใ†ใ€‚ (ๅ›ฝๅฎถๆˆฆ็•ฅ็‰นๅŒบๆๆกˆ๏ผ‰\r\n\r\n\r\n# ใ‚ณใƒผใƒ—ใ“ใ†ในใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณๅ ฑๅ‘Š\r\nๅœ“ไบ•ใ•ใ‚“๏ผˆใ‚ณใƒผใƒ—ใ“ใ†ใน๏ผ‰\r\n\r\n6 ใ‚ฐใƒซใƒผใƒ— 27 ๅๅ‚ๅŠ ใ€‚ๅ‹Ÿ้›†ใฏ facebook ใจใ‚ณใƒผใƒ—ใ“ใ†ในใฎใƒšใƒผใ‚ธใฎ 2 ็ฎ‡ๆ‰€ใงใ€‚ใปใจใ‚“ใฉใ™ในใฆ facebook ใ‹ใ‚‰ใ€‚\r\nใ‚ณใƒผใƒ—ใ“ใ†ในไผšๅ“ก168ไธ‡ไบบ\r\n\r\n- ใƒ•ใ‚ฉใƒˆใ‚ฆใ‚งใƒ‡ใ‚ฃใƒณใ‚ฐๆต่กŒใฃใฆใ‚‹ใ‘ใฉใ€ๅฎ‰ใ‹ใ‚ใ†ๆ‚ชใ‹ใ‚ใ†ใชใฎใ‚’ไฝ•ใจใ‹ใ™ใ‚‹ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ข\r\n- ๅฅๅบทใ‚ฝใƒ ใƒชใ‚จ / NPO ๆณ•ไบบใ‚†ใ„\r\n- ใƒใƒƒใƒˆใ‚’ๆดป็”จใ—ใŸ่พฒๆฅญ / COPLI U-35\r\n- ้…้€ๆ‹…ๅฝ“ x ใƒˆใƒฉใƒƒใ‚ฏ C to C ใฎใƒžใƒƒใƒใƒณใ‚ฐ / ไบบ้–“ไธญๅฟƒ่จญ่จˆๆŽจ้€ฒๆฉŸๆง‹\r\n - ๆ‹…ๅฝ“ 1000 ไบบใใ‚‰ใ„ใซใคใใ€้…้€ๅ…ˆ 400 ไบบใใ‚‰ใ„\r\n- ๆ„›ใฎใŠๆŽƒ้™คใ‚ตใƒผใƒ“ใ‚น / NTTใ‚นใƒžใƒผใƒˆใ‚ณใƒใ‚ฏใƒˆใฎใ‹ใŸใŒใŸ\r\n\r\nใ€Œใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ใ‚ใ‚‹ใ„ใฏใ€Œ็ต„ๅˆๅ“กๅŒๅฃซใฎใƒžใƒƒใƒใƒณใ‚ฐใ€ใซๅคงๅˆฅใ•ใ‚Œใใ†ใ€‚่ชฒ้กŒ่จญๅฎšใฎๅ•้กŒใ‚‚ใ‚ใ‚‹ใ‘ใ‚Œใฉใ€ใ€ŒIT ใ‚’ไฝฟใฃใŸ๏ฝžใ€ใจใ„ใ†ใƒใ‚คใƒณใƒˆใŒๅผฑใ‹ใฃใŸใ‹ใ‚‚ใ€‚\r\n\r\n# ใƒ‰ใ‚ณใƒข่ฆ‹ๅฎˆใ‚Šไบ‹ๆฅญ\r\nๅฑฑๆœฌใ•ใ‚“๏ผˆ็ฅžๆˆธๅธ‚๏ผ‰\r\n\r\nNTT DoCoMo ใจ 4 ๆœˆใ‹ใ‚‰ไบ‹ๆฅญ้€ฃๆบๅ”ๅฎšใง้€ฒใ‚ใฆใใŸใ€‚\r\n่ฅฟ็˜ๅฐๅญฆๆ กใจๅฎฎๆœฌๅฐๅญฆๆ กใง่ฉฆ้จ“้–‹ๅง‹๏ผˆๅฎŸ่จผๅฎŸ้จ“๏ผ‰ใ€‚2ๅ‰ฒใใ‚‰ใ„ใฎใ‹ใŸใŒๅ‚ๅŠ ใ€‚\r\n\r\nๅ‹•็š„ๆคœ็Ÿฅ๏ผˆใ‚นใƒžใƒ›ๅฐ‚็”จใ‚ขใƒ—ใƒช๏ผ‰ใจ้™็š„ๆคœ็Ÿฅ๏ผˆใ‚ปใƒณใ‚ตใƒผใ‚’้ง…ใชใฉใซ่จญ็ฝฎ๏ผ‰\r\nGooglePlayใงใ€Œ่ฆ‹ๅฎˆใ‚Šๅฟœๆด้šŠใ€ใ‚’ๆคœ็ดขใ—ใฆๆ˜ฏ้žใ‚คใƒณใ‚นใƒˆใƒผใƒซใ‚’\r\n\r\n# ใ‚ขใ‚ฏใƒ†ใ‚ฃใƒ–ใ‚จใ‚คใ‚ธใƒณใ‚ฐใ‚’ITใƒปไบบๅทฅ็Ÿฅ่ƒฝใซใ‚ˆใ‚Šๆ”ฏๆดๅผทๅŒ–\r\n็€งใ•ใ‚“\r\n\r\n" }, { "alpha_fraction": 0.7003327012062073, "alphanum_fraction": 0.740731954574585, "avg_line_length": 18.925373077392578, "blob_id": "cc6af0e196e855de78a3423c855142880a0278f9", "content_id": "6a28fe3be7c091096b972ed3a764cf8b7d2c8458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8841, "license_type": "no_license", "max_line_length": 122, "num_lines": 201, "path": "/_posts/2017-06-15-meeting29.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš29th\r\ndate: 2017-06-15 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (0)ๆ–ฐไฝ“ๅˆถ็™บ่กจ๏ผ(่ฅฟ่ฐท)\r\n (1)ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ร—ใ‚ณใƒผใƒ—ใ“ใ†ใน(ๅœ“ไบ•)\r\n (2)ๅฒกๆœฌๅ•†ๅบ—่ก—ใƒ–ใƒฉใƒณใƒ‰ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(ๅ–œๅคš)\r\n (3)ไธ‰็”ฐๅธ‚ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(้ซ˜ๆฉ‹)\r\n (4)้ ˆ็ฃจๆตทๅฒธใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(ๆœจๆˆธ)\r\n (5)Code for Japan Summit 2017 ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/299471347130311/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-18th-meeting--AdqBzang90eN3w5UwmLtvXURAQ-K9yESFpFkJUkK3IATnD2o)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=2861)\r\n\r\n1.ๅ ดๆ‰€\r\n[ใ‚นใƒšใƒผใ‚นใ‚ขใƒซใƒ•ใ‚กไธ‰ๅฎฎ](http://www.spacealpha.jp/sannomiya/access.html)\r\n\r\n2.ใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (0)ๆ–ฐไฝ“ๅˆถ็™บ่กจ๏ผ(่ฅฟ่ฐท)\r\n- (1)ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ร—ใ‚ณใƒผใƒ—ใ“ใ†ใน(ๅœ“ไบ•)\r\n- (2)ๅฒกๆœฌๅ•†ๅบ—่ก—ใƒ–ใƒฉใƒณใƒ‰ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(ๅ–œๅคš)\r\n- (3)ไธ‰็”ฐๅธ‚ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(้ซ˜ๆฉ‹)\r\n- (4)้ ˆ็ฃจๆตทๅฒธใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ(ๆœจๆˆธ)\r\n- (5)Code for Japan Summit 2017 ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\nใŠใฒใจใ‚Šๆง˜1,000ๅ††๏ผˆๅญฆ็”Ÿไปฅไธ‹็„กๆ–™๏ผ‰\r\n\r\n\r\n# ๆ–ฐไฝ“ๅˆถ็™บ่กจ\r\n\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nๅพŒ่—คใ•ใ‚“ๅ‰ฏไปฃ่กจใ‚นใ‚ซใ‚ฆใƒˆใ—ใพใ—ใŸใ€‚่ถŠๆ™บใ•ใ‚“ใŠ็–ฒใ‚Œใ•ใพใงใ—ใŸใ€‚\r\n\r\nfacebook page ใจใฏๅˆฅใซ code for kobe connect ใจใ„ใ†ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ็”จใฎใ‚ฐใƒซใƒผใƒ—ใŒใ‚ใ‚Šใพใ™ใ€‚ใ“ใกใ‚‰ใฏๅฎšไพ‹ๅ‚ๅŠ ใ—ใŸใ‹ใŸใซๆ‹›ๅพ…ใ‚’้€ใฃใฆใ„ใพใ™ใ€‚\r\ngithub pages ใฏ hackpad ใงๆ›ธใ„ใŸใƒญใ‚ฐใ‚’ใ‚ตใƒผใƒใ‚จใƒณใ‚ธใƒณใซๆ‹พใฃใฆใ‚‚ใ‚‰ใ†ใŸใ‚ใซใƒกใƒณใƒ†ใƒŠใƒณใ‚นใ—ใฆใ„ใพใ™ใ€‚\r\n\r\n\r\n# ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ร—ใ‚ณใƒผใƒ—ใ“ใ†ใน\r\n\r\nๅœ“ไบ•ใ•ใ‚“๏ผˆใ‚ณใƒผใƒ—ใ“ใ†ใน๏ผ‰\r\n\r\n[creww](https://creww.me/ja) ็คพใจใ‚ณใƒฉใƒœใƒฌใƒผใ‚ทใƒงใƒณใ—ใฆใ€ใ‚ทใƒŠใ‚ธใƒผใ‚’ๅพ—ใ‚ˆใ†ใจใ—ใฆใ„ใ‚‹ใ€‚\r\n6 ็คพๅˆๅŒใงใƒ—ใƒญใ‚ฐใƒฉใƒ ใซๅ‚ๅŠ ใ€‚\r\n\r\nใ€Œๅ…ˆ้ง†็š„ใชใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใƒปๆŠ€่ก“ใฎ็ฒๅพ—ใ€\r\nใ€Œ่ทๅ“กใฎใ‚คใƒŽใƒ™ใƒผใ‚ทใƒงใƒณ่งฆ็™บใ€\r\n\r\nๅบ—่ˆ—ใจๅ€‹้…ใ€็ฆ็ฅ‰ใ‚’ใƒ•ใƒƒใ‚ฏใซใ—ใฆใ€58 ็คพ่ฆ‹่พผใฟใง้–‹ๅง‹ใ€‚็ตžใ‚Š่พผใ‚€ๅฟ…่ฆใŒใ‚ใฃใŸใฎใงใ€6 ็คพใ€‚\r\n\r\n- IoT x ๅฎ…ๅ†…ใ‚ปใƒณใ‚ตใƒผใซใ‚ˆใ‚‹็”Ÿๆดปๆ”ฏๆดใ‚ทใ‚นใƒ†ใƒ \r\n- ่ชฐใงใ‚‚่ฌ›ๅบง้–‹ๅ‚ฌใงใใ‚‹ใ‚นใ‚ญใƒซใƒปใƒžใƒƒใƒใƒณใ‚ฐ\r\n- ็ช“ใ‚„ใƒ‰ใ‚ขใซ่ฒผใ‚‹ใ ใ‘ใฎใƒ›ใƒผใƒ ใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃ\r\n- ใƒ“ใƒƒใ‚ฐใƒ‡ใƒผใ‚ฟใฎ้ซ˜้€Ÿๅ‡ฆ็†ใ‚ขใƒ—ใƒชใ‚ฑใƒผใ‚ทใƒงใƒณ\r\n- ๅญ่‚ฒใฆๅฑคใฎใƒ‹ใƒผใ‚บๅŽ้›†ใƒปๅ•†ๅ“้–‹็™บ\r\n- ่บซใฎๅ›žใ‚Šใฎใ‚คใƒ™ใƒณใƒˆๆƒ…ๅ ฑใ‚’ๅŽ้›†ใƒปๆคœ็ดขใ‚ขใƒ—ใƒช\r\n\r\nใ‚ชใƒณใƒฉใ‚คใƒณไธŠใงใƒ–ใƒฉใƒƒใ‚ทใƒฅใ‚ขใƒƒใƒ—ใ€‚ๅ…จใฆใƒใƒฃใƒƒใƒˆใงใ‚„ใ‚Šๅ–ใ‚Šใ‚’ใ™ใ‚‹ใ€‚\r\n\r\nใ•ใ‚‰ใซ็ตžใ‚Š่พผใ‚“ใง 2 ็คพใ€‚ใ“ใ“ใงๅˆใ‚ใฆใ‚ชใƒ•ไผšใ€‚\r\ncrewwใฏใƒใƒฃใƒƒใƒˆใ‚’่ฆ‹ใฆใ„ใ‚‹\r\nใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใ‚’ๅฎˆใ‚‹ๅฝน็›ฎใ‚‚ใ‚ใ‚‹\r\n\r\n## ใ€ŒใŸใ‚ใพใฃใทใ€ๅœฐๅŸŸใ‚คใƒ™ใƒณใƒˆๆƒ…ๅ ฑใŒๅ…ฑๆœ‰ใงใใ‚‹ใ‚นใƒžใƒ›ใ‚ขใƒ—ใƒช\r\n\r\n<http://www.tamemap.net/>\r\n\r\n- ๆŽฒ็คบๆฟใ‚’ๆ’ฎๅฝฑใ—ใฆๆƒ…ๅ ฑใ‚’้›†ใ‚ใ‚‹๏ผˆๆŠ•็จฟๆ™‚ใซๅ ดๆ‰€ใจๆ—ฅๆ™‚ใ ใ‘ๅ…ฅๅŠ›๏ผ‰\r\n- ๏ผˆใ‚นใƒžใƒ›ๆ•™ๅฎค๏ผ‰๏ผšๅฎŸไฝ“ใจใ—ใฆใฏ้ซ˜้ฝข่€…ใฎใ‹ใŸใŒๆดป่บใ•ใ‚Œใฆใ„ใ‚‹ใ€‚\r\n- ใ‚นใƒžใƒ›ใƒปใ‚ขใƒ—ใƒชใŸใ‚ใพใฃใท\r\n- ใƒ‡ใ‚ธใ‚ฟใƒซๆƒ…ๅ ฑใจใ—ใฆๆ‰ฑใˆใ‚‹ใ‚ˆใ†ใซใชใ‚‹\r\n- ๏ผˆใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใ‚‚๏ผ‰๏ผšๅญฆๆ กใ‹ใ‚‰ใ‚‚ๅผ•ใๅˆใ„ใŒใ‚ใ‚‹\r\n\r\n## SIMOUNT\r\n\r\n<http://simount.com/>\r\n\r\n- ้ซ˜้€Ÿ\r\n- ่‡ชๅ‹•ใƒ‡ใƒผใ‚ฟ็ตๅˆ\r\n- ่‡ชๅ‹•้›†่จˆ\r\n\r\n2018-03 ใใ‚‰ใ„ใพใงๅฎŸ่จผๅฎŸ้จ“ๅฎŸๆ–ฝใ—ใพใ™๏ผ\r\n\r\n\r\n# ๅฒกๆœฌๅ•†ๅบ—่ก—ใƒ–ใƒฉใ‚คใƒณใƒ‰ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ\r\n\r\nๅ–œๅคšๅ…ˆ็”Ÿ\r\n\r\n๏ผ–ๆœˆ๏ผ‘๏ผ˜ๆ—ฅ๏ผ ๅฒกๆœฌๅ•†ๅบ—่ก—ใงใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃโ€•ๅฎŸๆ–ฝใ—ใพใ™ใ€‚\r\n\r\n[ใ€Œๅคšไธ–ไปฃๅ…ฑๅ‰ตใซใ‚ˆใ‚‹็Ÿฅ่ฆš้šœใŒใ„่€…็งปๅ‹•ๆ”ฏๆดใ‚ทใ‚นใƒ†ใƒ ใฎ้–‹็™บใ€](https://www.ristex.jp/examin/i-gene/project_26.html)\r\nๅ›ฝใƒ—ใƒญ\r\n(2014-11 -- 2017-11)\r\n\r\n- ใ€็›ฎ็š„ใ€‘่ฆ‹ใˆใชใ„ไบบใ‚„่ฆ‹ใˆใฅใ‚‰ใ„ไบบใซ่ก—ใชใ‹ใ‚’่‡ช็”ฑใซๆญฉใ„ใฆใ‚‚ใ‚‰ใŠใ†\r\n- ใ€ๆ–น้‡ใ€‘้Ÿณๅฃฐใ‚ฌใ‚คใƒ€ใƒณใ‚นใจ่•ๅœฐๅ›ณใ‚’ๆฐ‘็”ŸๆฉŸๅ™จใงๅฎŸ่ฃ…\r\n- ใ€ๆ–นๆณ•ใ€‘GPS๏ผˆๆบ–ๅคฉ้ ‚๏ผ‰BLEใƒ“ใƒผใ‚ณใƒณใ€PDR ใซใ‚ˆใ‚‹ๆธฌไฝใ€็”ปๅƒๅ‡ฆ็†ใ€ๆญฉ่กŒ็ตŒ่ทฏ้›†็ฉ\r\n\r\nๅˆฅใฎไบ‹ไพ‹๏ผš[ๆธ…ๆฐดๅปบ่จญ](http://www.shimz.co.jp/news_release/2017/2016046.html)ใƒปๆ—ฅๆœฌIBMใƒปไธ‰ไบ•ไธๅ‹•็”ฃ ๏ผ ๆ—ฅๆœฌๆฉ‹ใ‚ณใƒฌใƒ‰\r\n๏ผˆๅ‚่€ƒ๏ผš[ITใƒกใƒ‡ใ‚ฃใ‚ข่จ˜ไบ‹](http://www.itmedia.co.jp/enterprise/articles/1701/27/news034.html)๏ผ‰\r\n\r\n็งปๅ‹•ใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃๆƒ…ๅ ฑๅ…็ซฅๅ–้›†ๆŠ€่ก“\r\n\r\n้˜ชๆ€ฅใ€Œๅฒกๆœฌใ€ใจJRใ€Œๆ‘‚ๆดฅๆœฌๅฑฑใ€้–“ใ‚’้‡็‚น็š„ใซ่ฉฆ้จ“ใ€‚\r\nใ‚ขใƒผใ‚ฑใƒผใƒ‰ใŒ็„กใใฆ GPS ใŒใจใ‚Šใ‚„ใ™ใ‹ใฃใŸใ€‚\r\nๅ•†ๅบ—่ก—่‡ชไฝ“ใ‚‚ๅ…ƒๆฐ—ใงๅ”ๅŠ›็š„ใ€‚\r\n\r\nใ€Œๆญฉ่กŒ็ฉบ้–“ใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใ€๏ผˆๅ›ฝๅœŸไบค้€š็œใฎใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ๏ผ‰ใงไฝœๆˆใ•ใ‚Œใฆใ„ใ‚‹ใƒ‡ใƒผใ‚ฟใƒ™ใƒผใ‚นใŒใ‚ใ‚‹ใ€‚\r\n\r\n- ๅŸบๅ›ณ\r\n- POI / POR ใฎไป˜ๅŠ ๆƒ…ๅ ฑ\r\n- ใ‚จใƒƒใ‚ธใจใƒŽใƒผใƒ‰ใงใƒใƒƒใƒˆใƒฏใƒผใ‚ฏๆง‹็ฏ‰\r\n- ใƒซใƒผใƒˆ\r\n\r\nใจใ“ใ‚ใŒใ€่ฆ–่ฆš้šœใŒใ„่€…ใฎใ‹ใŸใซๅ‘ใ‘ใฆใฏใ€ใ‚ณใƒณใƒ†ใƒณใƒ„๏ผˆใƒ‡ใƒผใ‚ฟ๏ผ‰ใฎๆ็คบๆ–นๆณ•ใŒใƒใƒƒใ‚ฏใ€‚่ปŠใ„ใ™ใฎๅ ดๅˆใจใฏๅ•้กŒใซใชใ‚‹้ƒจๅˆ†ใŒ็•ฐใชใ‚‹ใ€‚\r\n\r\n- ่€ณใ‚’้˜ฒใŒใชใ„้Ÿณๅฃฐใ‚ฌใ‚คใƒ€ใƒณใ‚น๏ผˆ้ชจไผๅฐŽใƒ˜ใƒƒใƒ‰ใ‚ปใƒƒใƒˆ๏ผ‰\r\n - ใƒชใ‚ขใƒซใ‚ฟใ‚คใƒ ใง้ฉๅˆ‡ใช้‡ใฎๆƒ…ๅ ฑใ‚’ๆ็คบใ™ใ‚‹ๅฟ…่ฆใŒๅ‡บใ‚‹ใฎใงใ€ๅฎŸ็”จ็š„ใชๆ็คบๆ–นๆณ•ใŒ้›ฃใ—ใ„ใ€‚\r\n- E. AR ่•ๅœฐๅ›ณใจใฎ้€ฃๆบ\r\n - ่งฆๅœฐๅ›ณใง่งฆใ‚ŒใŸๅ ดๆ‰€ใ‚’็”ปๅƒ่ช่ญ˜ใ—ใ€ๅ‘จใ‚Šใฎๅบ—ใ‚’ใƒŠใƒ“ใ‚ฒใƒผใƒˆใ€‚\r\n - ๆ—…่กŒใฎไบˆ่กŒๆผ”็ฟ’ใฎใ‚ˆใ†ใชๆƒ…ๅ ฑใฎๆ็คบๆ–นๆณ•ใซใชใ‚‹ใ€‚\r\n\r\n่ฆ–่ฆš้šœๅฎณ่€…ใซๅฟ…่ฆใชๆƒ…ๅ ฑ๏ผˆๅœฐๅ›ณใซๆ›ธใ่พผใ‚€ในใๆƒ…ๅ ฑ๏ผ‰\r\n\r\n- ่ช˜ๅฐŽใƒ–ใƒญใƒƒใ‚ฏ๏ผˆ็‚นๅญ—ใƒ–ใƒญใƒƒใ‚ฏ๏ผ‰\r\n- ไบค้€šไฟกๅท๏ผˆ้Ÿณ้Ÿฟไฟกๅทใ€ๆญฉ่ปŠๅˆ†้›ขๅผไฟกๅท๏ผ‰๏ผš้Ÿณ้Ÿฟไฟกๅทใฏๅบƒๅ ดใฎใ‚ˆใ†ใชใจใ“ใ‚ใงใฏไธๅ‘ใ๏ผˆๅๅฐ„ใŒๅŠนใ๏ผ‰ใ€‚ๆญฉ่ปŠๅˆ†้›ขใฏๅŒใ˜้€ฒ่กŒๆ–นๅ‘ใฎ่ปŠใฎ้Ÿณใ‚’ๆ‰‹ๆŽ›ใ‹ใ‚Šใซๆญฉใใ“ใจใŒใงใใชใ„ใจใ„ใฃใŸๅ•้กŒใŒใ‚ใ‚‹ใ€‚\r\n- ใ‚จใƒฌใƒ™ใƒผใ‚ฟ\r\n- ใƒ•ใ‚ฟใฎ็„กใ„ๆบใ€ๆฐด่ทฏ\r\n- ๅคงๅž‹ๅปบ็‰ฉใฎๅ…ฅใ‚Šๅฃ\r\n\r\n็ฅžๆˆธใฎ็Šถๆณ\r\n\r\n- ๅธƒๅผ•ไบคๅทฎ็‚นใฎใ‚จใ‚นใ‚ณใƒผใƒˆใ‚พใƒผใƒณ๏ผš็‚นๅญ—ใƒ–ใƒญใƒƒใ‚ฏ๏ผˆ้–‹ใ‘ใฆๅ ดๆ‰€ใชใฎใง้Ÿณ้ŸฟไฟกๅทใฏๅŠนๆžœใŒ็„กใ„๏ผ‰\r\n- ็ฅžๆˆธใ‚ขใ‚คใ‚ปใƒณใ‚ฟใƒผ\r\n\r\nไปŠ้€ฑๆœซใ‚คใƒ™ใƒณใƒˆใ‚„ใ‚‹ใฎใงใ€่ˆˆๅ‘ณใฎใ‚ใ‚‹ๆ–นใฏ้€ฃ็ตกใ‚’๏ผ\r\n\r\n- Q. OSM ใธใฎ่จ˜่ฟฐใงใ€่ฆ–่ฆš้šœ็ข่€…็”จใง็‰นๅˆฅๅฟ…่ฆใชใ‚‚ใฎใฏใ‚ใ‚‹ใฎใ‹๏ผŸ\r\n- A. ไฟกๅทใƒป็‚นๅญ—ใƒ–ใƒญใƒƒใ‚ฏใƒป่ปŠๆญขใ‚\r\n\r\nใ—ใ‚ใ‚ใ›ใฎๆ‘ใงใฎๅฎŸ็ธพใƒ™ใƒผใ‚นใงใ€ใ“ใ‚“ใชใฎใŒๅฝนใซ็ซ‹ใฃใŸใ‚ˆใจใ„ใ†ใฎใ‚’ openstreetmap ใฎ wiki ใซใพใจใ‚ใฆใพใ™\r\n<https://wiki.openstreetmap.org/wiki/User:Higa4/JA:SidewalkMapping>\r\n\r\n- Q. ใ“ใ‚ŒใจๅŒๆง˜ใฎใ€Œๅฝนใซ็ซ‹ใคใ€่ฆ–่ฆš้šœ็ข่€…ๅ‘ใ‘ใฎใ‚ฟใ‚ฐใฏ๏ผŸ\r\n- A. ใพใ•ใซ่ฉฆ่กŒ้Œฏ่ชคไธญใ€‚ใชใ‹ใชใ‹้€ฒใพใชใ„ใ€‚\r\n\r\n\r\n# ไธ‰็”ฐๅธ‚ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ\r\n\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\nๅธ‚ๆฐ‘ๆดปๅ‹•ๆŽจ้€ฒใƒ—ใƒฉใ‚ถ ๅ”ๅŠ›ใฎๅ…ƒใ€ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผใ‚’ใ‚„ใฃใฆใฟใ‚ˆใ†ใจใ„ใ†่ฉฆใฟใ€‚\r\n\r\n6/24 ไธ‰็”ฐ้ง…ๅ‰ใ‚ญใƒƒใƒ”ใƒผใƒขใƒผใƒซ 6F\r\nไน…ไฟ็”ฐๅ„ชๅญใ•ใ‚“ โ† ใ™ใ”ใใงใใ‚‹ไบบใงใ™ใ€‚\r\n\r\nๆกˆๅ†…ใฏใ‚ฆใ‚งใƒ–ใ‚ตใ‚คใƒˆใฏใชใใฆใ€PDF ใŒใ‚ใ‚Šใพใ™ <https://www.facebook.com/groups/1536379276600668/1910312519207340/>\r\n\r\n\r\n# ้ ˆ็ฃจๆตทๅฒธใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ\r\n\r\nๆœจๆˆธใ•ใ‚“\r\n\r\nใ€ŒNHK ้šœใŒใ„่€…ใ‚ญใƒฃใ‚นใ‚ฟใƒผๅ‹Ÿ้›†ใ€ใซใ‚‚ใƒ“ใƒ“ใƒƒใจใใŸใ€‚ๆƒ…ๅ ฑ็™บไฟกใ‚’ใ‚„ใ‚ŠใŸใ„๏ผ\r\nCode for Kobe ใงใฉใ†ใ‹ใ‹ใ‚ใ‚ŠใŸใ„ใ‹\r\nๅฐใ•ใ„้ ƒใฏ็ฅžๆˆธใงใ‚ตใƒƒใ‚ซใƒผใ‚’ใ‚„ใฃใฆใ„ใŸใ€‚็ญ‘ๆณขๅคงๅญฆใ‚ตใƒƒใ‚ซใƒผ้ƒจใ€‚็พใƒ—ใƒญใจใ‚‚่ฉฆๅˆใ‚’ใ—ใŸใ“ใจใ‚‚ใ€‚\r\n\r\n็พๅœจ้šœใŒใ„่€…ใงๆƒ…ๅ ฑ็™บไฟกๅŠ›ใฎใ‚ใ‚‹ไบบใฏใ€ๅฎŸใฏๅ…ˆๅคฉใฎไบบใŒๅคšใใ€ๆƒ…ๅ ฑ็™บไฟกใฎใ‚ญใƒฃใƒชใ‚ข๏ผˆใ‹ใ‘ใฆใ„ใ‚‹ๆ™‚้–“๏ผ‰ใŒ้•ใ†ใ€‚ใใ‚Œใจๆฏ”่ผƒใ™ใ‚‹ใจใ€ๅฅๅธธ่€…ใ‹ใ‚‰้šœใŒใ„่€…ใชใฃใŸไบบใซใ‚ˆใ†ใ‚‹ๆƒ…ๅ ฑ็™บไฟกใŒๅผฑใ„ใ€‚\r\n\r\nๅฐ‘ใ—ๅ‰ใพใงใฏๅšๅ ฑๅ ‚ๅ‹คๅ‹™ใ€‚2ๅนดๅ‰ไบ‹ๆ•…ใง่ปŠใ„ใ™็”Ÿๆดปใซใ€‚ใใ“ใ‹ใ‚‰ๅง‹ใ‚ใฆใ„ใ‚‹ๆดปๅ‹•ใŒ2ใคใ€‚\r\n\r\n- ใƒชใ‚ฆใ‚ฉใƒผใ‚ฏใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ\r\n - <http://rwpj.jp/> ไธ€ไบบใงๅญค็‹ฌใซ้ ‘ๅผตใ‚‹ใฎใฏ่พ›ใ„ใฎใงใ€ใฟใ‚“ใชใงๆ›ธใ่พผใ‚ใ‚‹ๅ ดๆ‰€ใŒๆฌฒใ—ใ„\r\n - ๆ—ฅๆœฌใงใฏใƒชใƒใƒ“ใƒชใŒ้›ฃใ—ใ„๏ผˆใ•ใ›ใฆใ‚‚ใ‚‰ใˆใชใ„๏ผ‰\r\n - ๆœฌใฎๅˆถไฝœ๏ผˆๆณขใŒๆฅใŸใ‚‰ๅ‡บ็‰ˆใ—ใŸใ„ใชโ€ฆ๏ผ‰\r\n- ใ‚คใƒ™ใƒณใƒˆใฎ้–‹ๅ‚ฌ\r\n - ็ฅžๆˆธใƒป้ ˆ็ฃจๆตทๅฒธใซใƒ“ใƒผใƒใƒžใƒƒใƒˆใ‚’ๅฐŽๅ…ฅใ—ใŸใ„๏ผ ๏ฝž่ปŠใ„ใ™ใงใ‚‚ๆฅฝใ—ใ‚ใ‚‹ใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ“ใƒผใƒใ‚’็›ฎๆŒ‡ใ—ใฆ๏ฝž\r\n - <https://greenfunding.jp/lab/projects/1866>\r\n - ๅฅๅธธ่€…ใจ้šœใŒใ„่€…ใฎๆŽฅ็‚นใ‚’ๅข—ใ‚„ใ—ใฆใ€็คพไผšใซๆบถใ‘่พผใ‚ใ‚‹ใ‚ˆใ†ใซใ—ใฆใ„ใใŸใ„ใ€‚ใˆใฆใ—ใฆๅŒบๅˆ†ๅŒ–ใ•ใ‚Œใ‚‹ใŒใ€ใใ†ใงใฏใชใ„ๆ–นๅ‘ใงใ„ใใŸใ„ใ€‚\r\n - ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ in ้ ˆ็ฃจ\r\n\r\n\r\n# Code for Japan Summit 2017 ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\n\r\n- Code for Japan ใฎไบ‹ๆฅญใงใ™\r\n- ไปŠๅนดใฏ็ฅžๆˆธ้–‹ๅ‚ฌใง Code for Kobe ๅ…ฑๅ‚ฌใซใชใฃใฆใ„ใพใ™\r\n- ใƒญใ‚ดไธŠใŒใฃใฆใใพใ—ใŸ๏ผ\r\n- ๅ‚ๅŠ ใฏ้šๆ™‚ๅ‹Ÿ้›†ไธญใงใ™๏ผ\r\n- Code for Kobe ๆž ไฝœใ‚ŠใŸใ„ใฎใง๏ฝž๏ผˆใ“ใ“ใงๆ™‚้–“ๅˆ‡ใ‚Œ๏ผ‰\r\n\r\n" }, { "alpha_fraction": 0.6555292010307312, "alphanum_fraction": 0.6822590827941895, "avg_line_length": 16.050582885742188, "blob_id": "7b5738d6cf20feba5543b134ed5bd190515270e2", "content_id": "d929383979f233df5aa65189e601a8bc68764aed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9486, "license_type": "no_license", "max_line_length": 81, "num_lines": 257, "path": "/_posts/2017-12-21-meeting34.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš34th -Happy 3rd Anniversary-\r\ndate: 2017-12-21 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)Urban Innovation Kobe็ตๆžœๅ ฑๅ‘Š(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)10ๅˆ†\r\n (2)ใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017๏ฝžใใ—ใฆๆๅ‡บใธ๏ฝž(COGๅ‚ๅŠ ่€…)10ๅˆ†\r\n (3)ใฒใ‚‡ใ†ใ”ใตใ‚Œใ‚ใ„ใƒ‘ใƒˆใƒญใƒผใƒซLT(็ซนๆž—)10ๅˆ†\r\n (4)Kobe ร— Barcelona World Data Viz Challenge 2017(่ชฟๆ•ดไธญ)90ๅˆ†\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1931881857086373/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/HJ4QFZFff)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=3929)]\r\n\r\n\r\n# Code for Kobeๅฎšไพ‹ไผš34th -Happy 3rd Anniversary-\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n<http://www.kigyoplaza-hyogo.jp/>\r\n\r\n2.ใŠๅ“ๆ›ธใ\r\n\r\n- ใ€œไนพๆฏ๏ผˆใ‚ฑใƒผใ‚ญใ‚ซใƒƒใƒˆ๏ผ‰๏ฝž\r\n- (1)Urban Innovation Kobe็ตๆžœๅ ฑๅ‘Š(่–ฌๅธซๅฏบใจๆ„‰ๅฟซใชไปฒ้–“้”)10ๅˆ†\r\n- (2)ใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017๏ฝžใใ—ใฆๆๅ‡บใธ๏ฝž(COGๅ‚ๅŠ ่€…)10ๅˆ†\r\n- (3)ใฒใ‚‡ใ†ใ”ใตใ‚Œใ‚ใ„ใƒ‘ใƒˆใƒญใƒผใƒซLT(็ซนๆž—)10ๅˆ†\r\n- (4)Kobe ร— Barcelona World Data Viz Challenge 2017(่ชฟๆ•ดไธญ)90ๅˆ†\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n\r\n## Urban Innovation Kobe็ตๆžœๅ ฑๅ‘Š\r\n่–ฌๅธซๅฏบใ•ใ‚“\r\n\r\n## ใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2017๏ฝžใใ—ใฆๆๅ‡บใธ๏ฝž\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nๆๅ‡บใ—ใพใ—ใŸ๏ผ\r\n็ฅžๆˆธๅธ‚ใŒๅ‹Ÿ้›†ใ—ใฆใ„ใชใ‹ใฃใŸใŒใ€ไธ‰็”ฐๅธ‚ใฏๅ‹Ÿ้›†ใ—ใฆใ„ใŸใฎใงไธ‰็”ฐๅธ‚ใซๆๅ‡บ\r\n\r\nไธ‰็”ฐๅธ‚ใฎ่ชฒ้กŒ\r\n* ่พฒๆ‘ๅœฐๅŸŸใ‚„ใ€ใƒ‹ใƒฅใƒผใ‚ฟใ‚ฆใƒณใชใฉ็•ฐใชใ‚‹ๅœฐๅŸŸใฎ็‰นๆ€งใซๅฟœใ˜ใŸไบค้€šใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใฎๆง‹็ฏ‰ใจๅค–ๅ‡บๆ”ฏๆดใฎใ‚ใ‚Šๆ–นใซใคใ„ใฆ\r\n* <http://park.itc.u-tokyo.ac.jp/padit/cog2017/area/kinki.html#sanda-shi>\r\n* ่ชฒ้กŒใŒใตใ‚ใฃใจใ—ใฆใ„ใฆ้›ฃใ—ใ‹ใฃใŸ\r\n* ้›ฒๅ—ใฎๅœฐๅŸŸ็ต„็น”ใ‚’ๅ‚่€ƒใซใ—ใŸ่งฃๆฑบๆ–นๆณ•\r\n\r\n## ใฒใ‚‡ใ†ใ”ใตใ‚Œใ‚ใ„ใƒ‘ใƒˆใƒญใƒผใƒซLT\r\n็ซนๆž—ใ•ใ‚“\r\n\r\n* ใฒใ‚‡ใ†ใ”ใตใ‚Œใ‚ใ„ใƒฉใƒณใƒ‹ใƒณใ‚ฐใƒ‘ใƒˆใƒญใƒผใƒซ(ใตใ‚Œใƒ‘ใƒˆ)\r\n * <https://www.facebook.com/hyogo2020patrol/>\r\n * ่‡ชไธป็š„ใชใƒฉใƒณใƒ‹ใƒณใ‚ฐใ‚’้€šใ—ใฆ้˜ฒ็Šฏ\r\n * ็œŒๅ†…ใฎ็Šฏ็ฝชไปถๆ•ฐใฏๅนณๆˆ14ๅนดใŒใƒ”ใƒผใ‚ฏ\r\n * -4% ็จ‹ๅบฆใฎ้€ฃ็ถšๆธ›ๅฐ‘ๅ‚พๅ‘ใซใ‚ใ‚‹\r\n * ใจใฏใ„ใˆใ€ไธๅฏฉ่€…ๆƒ…ๅ ฑใŒๅคšใ„\r\n * ใƒœใƒฉใƒณใƒ†ใ‚ฃใ‚ขใฎ่ฆ‹ๅฎˆใ‚ŠใฎใŠใ‹ใ’ใงๆธ›ใฃใฆใใŸ\r\n * ใ€Œ้’่‰ฒ้˜ฒ็Šฏใƒ‘ใƒˆใƒญใƒผใƒซๆดปๅ‹•ใ€ใชใฉใŒใ‚ใฃใŸ\r\n * ใƒกใƒณใƒใƒผใŒๅ›บๅฎšๅŒ–ใ—ใฆใ€้ซ˜้ฝขๅŒ–\r\n * ไธ–ไปฃไบคไปฃใŒใงใใฆใ„ใชใ„\r\n * ใชใ‚“ใฉใ‹ๆ‰“็ ดใงใใชใ„ใ‹\r\n * ็ฅžๆˆธใƒžใƒฉใ‚ฝใƒณใ€ๅงซ่ทฏใƒžใƒฉใ‚ฝใƒณใชใฉใงใƒฉใƒณใƒ‹ใƒณใ‚ฐใ™ใ‚‹ไบบใŒๅข—ใˆใฆใใŸ\r\n * ้˜ฒ็Šฏใซๆดปใ‹ใ›ใชใ„ใ‹๏ผŸ\r\n * ๏ผ˜ๅ›žใปใฉๅฎŸๆ–ฝ\r\n* ็‰นๅพด\r\n * ใ‚ฐใƒซใƒผใƒ—ใง๏ผˆ้˜ฒ็ŠฏไธŠใฎ็†็”ฑใ€็›ฎ็ซ‹ใคใ€ๆฅฝใ—ใ„๏ผ‰\r\n * ใ‚†ใฃใใ‚Šใจ๏ผˆใƒšใƒผใ‚น๏ผ‰\r\n* ๆœŸๅพ…ใ•ใ‚Œใ‚‹ๅŠนๆžœ\r\n * ไฝ“ๆ„Ÿๆฒปๅฎ‰ใฎๅ‘ไธŠ\r\n* ้ญ…ๅŠ›๏ผˆใ‚ขใƒณใ‚ฑใƒผใƒˆใ‹ใ‚‰๏ผ‰\r\n * ่ก—ใฎๅฎ‰ๅ…จใซๅฏ„ไธŽ\r\n * ่ถฃๅ‘ณใงไบบใฎๅฝนใซ็ซ‹ใค\r\n * ไบคๆต\r\n * ๅฅๅบทๅข—้€ฒ\r\n* ๆˆๅŠŸใ•ใ›ใ‚‹ใซใฏๆ”ฏๆดใŒๅฟ…่ฆ\r\n * ็พ็Šถ\r\n * ใ€Œใ‚„ใ‚Šใพใ—ใ‚‡ใ†ใ€ใจๆŽ›ใ‘ๅฃฐใ—ใŸๅพŒใฎใƒ•ใ‚ฉใƒญใƒผใŒใงใใฆใ„ใชใ„\r\n * ใƒ“ใƒ–ใ‚นใฎๆไพ›ใใ‚‰ใ„ใ—ใ‹ใงใใฆใ„ใชใ„\r\n * ้˜ฒ็Šฏๆƒ…ๅ ฑใฎๆไพ›\r\n * ใƒ‘ใƒˆใƒญใƒผใƒซๅฎŸ็ธพใฎ่ฆ‹ใˆใ‚‹ๅŒ–\r\n * ใƒฉใƒณใƒŠใƒผๅŒๅฃซใฎใ‚ณใƒŸใƒฅใƒ‹ใ‚ฑใƒผใ‚ทใƒงใƒณ\r\n * facebookใฎใ„ใ„ใญใชใฉ\r\n * ICTใงใชใซใ‹ใงใใชใ„ใ‹๏ผŸ\r\n\r\n## Kobe ร— Barcelona World Data Viz Challenge 2017\r\n\r\n### ไธญ้–“็™บ่กจ\r\n้•ทไบ•ใ•ใ‚“๏ผ ็ฅžๆˆธๅธ‚ไผ็”ป่ชฟๆ•ดๅฑ€\r\n- ASICS ใ‚นใƒžใƒผใƒˆใ‚‰ใ‚คใƒณใƒ‹ใƒณใ‚ฐใ‚ณใƒผใ‚น\r\n- docomo ่ฆ‹ๅฎˆใ‚Šใ‚ตใƒผใƒ“ใ‚น\r\n- ใƒใƒซใ‚ปใƒญใƒŠๅธ‚\r\n- ็ฅžๆˆธๅคงๅญฆ ้žๅธธๅ‹ค่ฌ›ๅธซ\r\n\r\nใƒใƒซใ‚ปใƒญใƒŠๅธ‚ใƒฌใƒใƒผใƒˆ\r\n- ็‹ฌ็ซ‹ๅ•้กŒ็œŸใฃใŸใ ไธญ\r\n- ไบบๅฃใฏ็ฅžๆˆธๅธ‚ใซ่ฟ‘ใ„\r\n- ใ‚นใƒšใ‚คใƒณใงใฏใƒžใƒ‰ใƒชใƒผใƒ‰ใซๆฌกใ\r\n- ่ฒกๆ”ฟไบˆ็ฎ—ใฏ็ฅžๆˆธๅธ‚ใฎ 4 ๅ€็จ‹ๅบฆใ‚ใ‚‹\r\n\r\nใ€Œใ‚นใƒžใƒผใƒˆใ‚ทใƒ†ใ‚ฃใ‚จใ‚ญใ‚นใƒใ€ใซๅ‡บๅบ—ใ—ใฆใใŸ\r\n\r\nใƒใƒซใ‚ปใƒญใƒŠ้ƒฝๅธ‚็”Ÿๆ…‹ๅญฆๅบ\r\n- ๅธ‚ใฎๅค–้ƒญๅ›ฃไฝ“\r\n- ใ‚ณใƒผใƒซใ‚ปใƒณใ‚ฟใƒผใ‚‚ๅซใ‚ใฆใ€ใ‚ปใƒณใ‚ตใƒผใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใฎใƒ‡ใƒผใ‚ฟใ‚‚่“„็ฉใ—ใฆใ„ใ‚‹\r\n- ่ทๅ“กใŒๅˆ†ๆž=>ๆ”ฟ็ญ–ๆๆกˆ\r\n- ้ƒฝๅธ‚ใฎๅคšๆง˜ๆ€งใŒไธŠใŒใ‚ŒใฐไธŠใŒใ‚‹ใปใฉ็ซถไบ‰ๅŠ›ใŒไธŠใŒใ‚‹ใจใ„ใ†็™บๆƒณ\r\n- ๆ–‡ๅŒ–ใ‚„ๆƒ…ๅ ฑใฎไบคๆ›ใŒใชใ•ใ‚Œใ‚‹ใƒ‘ใƒ–ใƒชใƒƒใ‚ฏใ‚นใƒšใƒผใ‚นใ‚’็ฉๆฅต็š„ใซๅฐŽๅ…ฅ\r\n\r\nใจใ‚Šใ‚ใˆใš่กŒใฃใฆใฟใŸใจใ“ใ‚ใงใฏใ€็‹ฌ็ซ‹ใซ้–ขใ™ใ‚‹่’ใ‚ŒใŸๆ„Ÿใ˜ใฏใชใ‹ใฃใŸใ€‚\r\n\r\n็ฅžๆˆธใƒฉใ‚ฆใƒณใƒ‰๏ผˆ๏ผ’ๆœˆ๏ผ‘๏ผ—ๆ—ฅใ€œ๏ผ‘๏ผ˜ๆ—ฅ@KIITO๏ผ‰\r\n\r\n25ๅ‘จๅนดใฎไบ‹ๆฅญใจใ—ใฆใ‚‚ใงใใ‚‹ใ‚ˆใ†ใซใ—ใ‚ˆใ†ใจใ—ใฆใ„ใ‚‹\r\n\r\nใ‚นใƒžใƒผใƒˆใ‚ทใƒ†ใ‚ฃ EXPO ใธใฎๅ‡บๅฑ•\r\n- JETRO ใฎใƒ–ใƒผใ‚น\r\n- Viz Challenge ใฎใ‚ขใ‚ฏใ‚ปใ‚นใƒ‘ใ‚นใจใฎใƒใƒผใ‚ฟใƒผใจใ„ใ†้ขใ‚‚ใ‚ใฃใŸ\r\n- docomo, yahoo,ใ€€ใ‚คใƒณใƒ•ใ‚ฉใƒฉใ‚ฆใƒณใ‚ธใ€็ฅžๆˆธๅธ‚ใฎๅ…ฑๅŒๅ‡บๅฑ•\r\n- ่ฆๆจกใฏใฉใ‚“ใฉใ‚“ๅคงใใใชใฃใฆใใฆใ„ใ‚‹\r\n - ๆ—ฅๆœฌใฎ่‡ชๆฒปไฝ“ใ‚‚่ค‡ๆ•ฐๅ‡บใฆใใฆใ„ใ‚‹\r\n\r\n\r\n### ่จชๆ—ฅๅค–ๅ›ฝไบบๅ‘ใ‘\r\n้ซ˜ๅฒธใ•ใ‚“๏ผ ๅฏŒๅฃซ้€š\r\n\r\nใŠใ‚‚ใฆใชใ—ใฎๆ—ฅๆœฌใ‚’ๆ„Ÿใ˜ใฆใ‚‚ใ‚‰ใˆใ‚‹่ณผ่ฒทไฝ“้จ“ใ‚’ๆไพ›ใ™ใ‚‹\r\n- ่จ€่ชž\r\n- ๆทท้›‘\r\n - 3ๅˆ†ใŸใคใจใ‚คใƒฉใ‚คใƒฉใ—ๅง‹ใ‚ใ‚‹\r\n - ๅ…็จŽใƒฌใ‚ธใฏๅนณๅ‡10ๅˆ†ใงใ€ใปใผใพใกใŒใ„ใชใใ‚คใƒฉใ‚คใƒฉใ—ใฆใ„ใ‚‹ใฏใš\r\n\r\nใŸใ„ใฆใ„่ฒทใ„ใŸใ„ใ‚‚ใฎใฎๅ†™็œŸใ‚’ใ‚นใƒžใƒ›ใซไฟๅญ˜ใ—ใฆใ€่จชๆ—ฅใ—ใฆใ„ใ‚‹ใ€‚\r\n\r\n็”ปๅƒใ‹ใ‚‰ๅ•†ๅ“ใ‚’ๆกˆๅ†…ใ™ใ‚‹ๆฉŸ่ƒฝใ‚’ๅฎŸ่ฃ…ใ™ใ‚‹\r\n\r\n็ตŒ็”ฃ็œใฎใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใงใ€ใƒใƒผใ‚ณใƒผใƒ‰ใ‹ใ‚‰ๅ•†ๅ“ใ‚’่กจ็คบใ™ใ‚‹\r\n\r\nECใ‚ตใ‚คใƒˆไฝœใฃใฆใ€ใใ“ใง้ธใ‚“ใงใ‚‚ใ‚‰ใฃใฆใ€ใƒ‘ใƒƒใ‚ญใƒณใ‚ฐใพใงใ—ใฆใ€ๅบ—่ˆ—ใ€ใ‚ณใƒณใƒ“ใƒ‹ใงๅ—ใ‘ๅ–ใ‚Œใ‚‹ใ‚ˆใ†ใซใ™ใ‚Œใฐๅพ…ใกๆ™‚้–“ใŒใชใใชใ‚‹ใฎใงใฏ๏ผŸ\r\n\r\n\r\n### Kobe Demographics API\r\nไฝๅคใ•ใ‚“๏ผ†้ฆ™ๅทใ•ใ‚“\r\n\r\nไปŠใฎไบบๅฃ็ตฑ่จˆใƒ‡ใƒผใ‚ฟ\r\n- ใƒŸใ‚ฏใƒญใชไบบๅฃ็ตฑ่จˆใƒ‡ใƒผใ‚ฟใŒใชใ„\r\n\r\nใƒŸใ‚ฏใƒญใชไบบๅฃ็ตฑ่จˆใƒ‡ใƒผใ‚ฟใ‚’ๅใๅ‡บใ™API\r\n- ไฝๆฐ‘ๅŸบๆœฌๅฐๅธณ๏ผˆ็”บไธ็›ฎใ€ๅนด้ฝข๏ผ‰\r\n - Excel, PDFใง็”จๆ„ใ•ใ‚Œใฆใ„ใ‚‹\r\n - ใ€€Yahoo geocoding ใจ้€ฃๆบใ•ใ›ใฆใ€็ทฏๅบฆ็ตŒๅบฆใ‚’ๅˆใ‚ใ›ใ‚‹\r\nๅฏ่ฆ–ๅŒ–\r\n- ไบบๅฃ็ตฑ่จˆใƒ‡ใƒผใ‚ฟใ‚’ใƒ’ใƒผใƒˆใƒžใƒƒใƒ—ใง่กจ็คบ\r\n - ็ทฏๅบฆ็ตŒๅบฆใ€ๅŠๅพ„ใชใฉใฎ็ฉบ้–“ๆŒ‡ๅฎšใ‚‚ๅฏ่ƒฝ\r\n\r\nๅฟœ็”จไพ‹\r\n- ๅ…ฌๅ…ฑๆ–ฝ่จญใฎใƒžใƒผใ‚ฑใƒ†ใ‚ฃใƒณใ‚ฐ๏ผˆใฉใ“ใซๅปบใฆใ‚‹ใจใ‚ˆใ„ใ‹๏ผ‰\r\n\r\nQ&A\r\n- ๆ–ฝ่จญใซๅ…ฅใฃใฆใ„ใ‚‹ไบบใฏๅˆฅใฎๅฏพๅฟœใ‚’ใ—ใŸใปใ†ใŒใ„ใ„ใ‹ใ‚‚\r\n- ใ‚คใƒ™ใƒณใƒˆใฎๆๆกˆใพใงใ—ใฆใใ‚Œใ‚‹ใจ้žๅธธใซใ‚ใ‚ŠใŒใŸใ„\r\n- ้šœๅฎณใฎๆœ‰็„กใพใงๅ…ฅใ‚‹๏ผŸ\r\n - ใ‚คใƒ™ใƒณใƒˆ้–‹ๅ‚ฌใฎ็›ฎๅฎ‰ใซใชใ‚‹\r\n\r\n### ็ตฆ้ฃŸ่‚ฒ\r\nๅ‰็”ฐใ•ใ‚“\r\n\r\n- ็ตฆ้ฃŸ่ฒปใ‚’ๅŒใ˜ใซใ™ใ‚‹ใซใฏ้‡ใ‚’ๆธ›ใ‚‰ใ™๏ผŸ\r\n- ็ตฆ้ฃŸ๏ผ‹้ฃŸ่‚ฒใฎใ‚ขใƒ—ใƒช\r\n\r\n- ๆ „้คŠใƒใƒฉใƒณใ‚นใ‚’่ฆ–่ฆš็š„ใซ่กจ็คบ\r\n - APIใงๅ„ๆ „้คŠใ‚’่จˆ็ฎ—\r\n - ๏ผ‘ๆ—ฅใ‚ใŸใ‚Šใฎๅฟ…่ฆ้‡ใจใ‹ใจๆฏ”่ผƒ\r\n - ๅญไพ›ใŸใกใซ้ฃŸใซๅฏพใ™ใ‚‹่ˆˆๅ‘ณใ‚’ๆŒใฃใฆใ‚‚ใ‚‰ใ†\r\n - ไฟ่ญท่€…ใซใ‚‚็Ÿฅใฃใฆใ‚‚ใ‚‰ใ†\r\n\r\nQ&A\r\n- ็”Ÿ้ง’ๅธ‚ใฎใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใงใ‚ขใƒ—ใƒชใ‚’ไฝœๆˆไธญ๏ผ ๅฅˆ่‰ฏๅ…ˆ\r\n - Code for Ikoma\r\n - <https://www.code4ikoma.org/?p=155>\r\n - ใ‚ขใƒฌใƒซใ‚ฒใƒณใชใฉใ‚‚ใ‚ใ‚Šใ€CSVใงๅ‡บใ•ใ‚Œใฆใ„ใ‚‹\r\n - ็Œฎ็ซ‹่กจใฏไฟ่ญท่€…ใฏใ‚ใพใ‚Š่ฆ‹ใฆใ„ใชใ„\r\n - ใ‚ขใƒ—ใƒชใงๅˆใ‚ใฆ็Ÿฅใฃใฆ้ฉšใ\r\n - ๅฅˆ่‰ฏใฎ็ตฆ้ฃŸๅ›ฃไฝ“๏ผŸ\r\n - ๆƒ…ๅ ฑๅ…ฌ้–‹่ซ‹ๆฑ‚ใ‚’ใ—ใฆใ‚‹\r\n - ใ‹ใชใ‚ŠๅŽณใ—ใ่ฆ‹ใฆใ‚‹\r\n - ใŠๆฎ‹ใ—ใฎใƒ‡ใƒผใ‚ฟ\r\n - ้ฃŸ่‚ฒใฎๅทฅๅคซใงๆ”นๅ–„ใงใใ‚‹\r\n\r\n### ๆฒปๅฎ‰ใฎๅฏ่ฆ–ๅŒ–\r\nๆพๅ…ƒใ•ใ‚“๏ผˆ๏ผ†ไผŠ่—คใ•ใ‚“๏ผ‰\r\n\r\n- ไฝใ‚€ๅ ดๆ‰€ใ‚’้ธใถใซใ‚ใŸใฃใฆๆฒปๅฎ‰ใฏๅคงไบ‹\r\n- ๅ€‹ไบบใซใ‚ˆใฃใฆๅŸบๆบ–ใŒ้•ใ†\r\n - ็Šฏ็ฝชไปถๆ•ฐ\r\n - ้จ’้Ÿณ\r\n - ๅคœใฎๆ˜Žใ‚‹ใ•\r\n - ไธๅฏฉ่€…็›ฎๆ’ƒไปถๆ•ฐ\r\n- ใ„ใ‚ใ„ใ‚ใชๆŒ‡ๆจ™ใ‚’้›†ใ‚ใŸ็ทๅˆ็š„ใชๆŒ‡ๆจ™ใง่ฉ•ไพกใงใใ‚‹ใ‚ˆใ†ใซใ—ใŸใ„\r\n\r\nQ&A\r\n- ็…งๅบฆใ‚’่ชฟในใ‚‹ใ ใ‘ใงใ‚‚ใ‹ใชใ‚Šๆทฑใ„\r\n - ใ‚ธใƒฃใƒผใƒŠใƒซใŒ๏ผ’ๆœฌๅˆ†ใใ‚‰ใ„\r\n - ๆ˜Žใ‚‹ใ„ๅ ดๆ‰€ใŒๅคšใ„ไธญใฎๆš—ใ„ๅ ดๆ‰€ใจใ€ใใ‚‚ใใ‚‚ๆš—ใ„ๅ ดๆ‰€ใŒๅคšใ„ใจใ“ใ‚ใฎๅ•้กŒใฏ็•ฐใชใ‚‹\r\n\r\n### ReW(ใƒชใƒผใƒ€ใƒ–ใƒซใ€€= Referring to Review)\r\nๆดชใ•ใ‚“\r\n\r\nๅ•†ๅ“ใƒฌใƒ“ใƒฅใƒผๆƒ…ๅ ฑใ‹ใ‚‰ๆœ‰็”จใชๆƒ…ๅ ฑใ‚’ๆŠฝๅ‡บใ—ใฆไฝฟใ†\r\n- ๅคง่ก†ใฎๆบ€่ถณ\r\n- ๅคง่ก†ใฎไธๆบ€\r\n- ไธ€้ƒจใฎไบบใฎๆบ€่ถณ\r\n- ใใฎไธญใ‹ใ‚‰ๅ˜่ชžใ‚’่ฆ‹ใคใ‘ๅ‡บใ—ใฆๅฏ่ฆ–ๅŒ–\r\n\r\nๅฏพๅฟœๅˆ†ๆž\r\n- ๅคšๅค‰้‡่งฃๆžๆณ•ใฎไธ€ใค\r\n- ใ€Œๆบ€่ถณใ€ใ‚’ใ‚ฏใƒฉใ‚นใ‚ฟใƒชใƒณใ‚ฐใ™ใ‚‹\r\n - ๅคง่ก†ใฎๆบ€่ถณใ€ใƒžใƒ‹ใ‚ขใฎๆบ€่ถณใ€ใจใ„ใฃใŸใ‚ฐใƒซใƒผใƒ”ใƒณใ‚ฐ\r\n - ใ‚ฐใƒฉใƒ•ๅŒ–ใ—ใŸๆ™‚ใซ็œŸใ‚“ไธญใซใงใ‚‹ใจใ€Œๅคง่ก†ใ€ใ€็ซฏใซๅ‡บใ‚‹ใ‚‚ใฎใฏใ€Œไธ€้ƒจใฎไบบใ€ใจ่ฆ‹ใ‚Œใ‚‹\r\n\r\nQ&A\r\n- ๅฝขๆ…‹็ด ใ ใ‘ใงใชใใƒˆใƒ”ใƒƒใ‚ฏใง่ฆ‹ใฆใฟใ‚‹ใฎใ‚‚ใ„ใ„\r\n- ๆ„Ÿๆƒ…ๅˆ†ๆžใ‚‚ๆœ‰ๅŠน\r\n - ้ซ˜ใ„่ฉ•ไพกใฎไธญใงใฎใƒžใ‚คใƒŠใ‚น่ฉ•ไพก\r\n - ไฝŽใ„่ฉ•ไพกใฎไธญใงใฎใƒ—ใƒฉใ‚น่ฉ•ไพก\r\n- ๆ—ฅๆœฌ่ชžใ‚ˆใ‚Š่‹ฑ่ชžใชใฉใฎๅค–ๅ›ฝ่ชžใฎๆ–นใŒ่กจ็พใฎๆบใ‚ŒใŒๅฐ‘ใชใ„ใ‹ใ‚‚ใ—ใ‚Œใชใ„\r\n- ใ‚ญใƒผใƒฏใƒผใƒ‰ใ‚’่“„็ฉใ™ใ‚Œใฐใ€้•ใ†ๆ–นๆณ•ใซไฝฟใˆใ‚‹ใฎใงใฏ\r\n - ใ‚ใ‚‹ใ‚ญใƒผใƒฏใƒผใƒ‰ใ‚’ๆŒ‡ๅฎšใ™ใ‚‹ใจ่ฆณๅ…‰ๅœฐใŒๅ‡บใฆใใ‚‹ใชใฉ\r\n### ONE'S ROUTE\r\nๆธก้‚Šใ•ใ‚“\r\n\r\n็ฅžๆˆธๅธ‚ใฎๅ•้กŒ\r\n- ๅ‚ใŒๆ€ฅ\r\n- ้€šๅญฆ่ทฏใ‚‚ใ—ใ‚“ใฉใ„\r\n\r\n็งปๅ‹•่€…ใฎใƒ‹ใƒผใ‚บใ‚’ๆบ€ใŸใ™ใƒซใƒผใƒˆใ‚’ๆๆกˆ\r\n- ใ‚นใƒ†ใƒผใ‚ฟใ‚นๅค‰ๆ•ฐ\r\n - ๅฑ‹ๆ นใฎๆœ‰็„กใ€ๅ‚พๆ–œใ€่ˆ—่ฃ…ใ€ๆฎตๅทฎใชใฉ\r\n\r\n้กžไผผใ‚ตใƒผใƒ“ใ‚น\r\n- Flattest Route\r\n - ๅ‚พๆ–œใ‚’ใ‚ฐใƒฉใƒ•ใงๅฏ่ฆ–ๅŒ–\r\n- Wheel map\r\n - ่ปŠๆค…ๅญใงใ‚ขใ‚ฏใ‚ปใ‚นๅฏ่ƒฝใ‹ใฉใ†ใ‹ใ‚’่กจ็คบ\r\n\r\nQ&A\r\n- ๅˆฐ็€ๅœฐใ€ๆ‰€่ฆๆ™‚้–“ใ‚’ๆฑบใ‚ใฆใ€ใ‚ใจใฏๅฏ„ใ‚Š้“ใ—ใชใŒใ‚‰ใงใใ‚‹ใƒซใƒผใƒˆใ‚’ๅ‡บใ—ใฆใใ‚Œใ‚‹ใจใ‚ใ‚ŠใŒใŸใ„\r\n- ๅคง้˜ชๆข…็”ฐๅœฐไธ‹ใƒžใƒƒใƒ—\r\n\r\n### ๅ…จไฝ“ใฎ่ฌ›่ฉ•\r\n้•ทไบ•ใ•ใ‚“ใ€€ๆปๆ‘ใ•ใ‚“\r\n\r\nๅธ‚ๆฐ‘ใƒปๆฐ‘้–“ใฎใƒ‹ใƒผใ‚บใ‚’ๆŠŠๆกใ™ใ‚‹ใ“ใจใŒๅคงไบ‹\r\n" }, { "alpha_fraction": 0.7118831872940063, "alphanum_fraction": 0.750829815864563, "avg_line_length": 17.461206436157227, "blob_id": "b11185431593c32dd5d324526bf0ef79c2e4a8f3", "content_id": "a582d317b4b9575b7b152f4f66beec807acfdc64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10054, "license_type": "no_license", "max_line_length": 159, "num_lines": 232, "path": "/_posts/2017-07-21-meeting30.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš30th\r\ndate: 2017-07-20 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ๆฑๅคงๅคงๅญฆ้™ข่ฌ›ๆผ”ๅ†็พ๏ฝž้•ทไบ•ใ€ๆฑๅคงใงๆŽˆๆฅญใ—ใฆใใŸใฃใฆใ‚ˆ๏ฝž(้•ทไบ•)\r\n (2)ใŸใ‚ใพใฃใทร—ใ‚ณใƒผใƒ—ใ“ใ†ใน(ๆธ…ๆฐด)\r\n (3)ใ‚ขใ‚ธใ‚ขใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใƒใƒƒใ‚ซใ‚ฝใƒณๅ‘Š็Ÿฅ(ใจใ‚Šใ‚„ใพ)\r\n (4)็คพไผš็š„่ชฒ้กŒ่งฃๆฑบๅž‹ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ๅ‹Ÿ้›†(ๆพๆ‘)\r\n (5)ใ‚ตใƒŸใƒƒใƒˆๅ†…Code for Kobeใ‚ปใƒƒใ‚ทใƒงใƒณใƒปใƒ–ใƒผใ‚นๆคœ่จŽ(่ฅฟ่ฐท)\r\n (6)CJS2017ไผ็”ป้‹ๅ–ถใ‚นใ‚ฟใƒƒใƒ•ๅ‹Ÿ้›†(้ซ˜ๆฉ‹)\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/427132074353115/)\r\n/ [PaperๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-30th-meeting-DCgmoyXHNr346UnXduEAK)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=2988)]\r\n\r\n1.ๅ ดๆ‰€\r\n\r\n[็ฅžๆˆธใ‚ตใƒณใ‚ปใƒณใ‚ฟใƒผใƒ—ใƒฉใ‚ถ่ฅฟ้คจใ€€6Fใ€€ไผš่ญฐๅฎค4ๅทๅฎค](http://www.kscp.co.jp/room/index.html#a05)\r\n\r\n2.ใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (1)ๆฑๅคงๅคงๅญฆ้™ข่ฌ›ๆผ”ๅ†็พ๏ฝž้•ทไบ•ใ€ๆฑๅคงใงๆŽˆๆฅญใ—ใฆใใŸใฃใฆใ‚ˆ๏ฝž(้•ทไบ•):30ๅˆ†\r\n- (2)ใŸใ‚ใพใฃใทร—ใ‚ณใƒผใƒ—ใ“ใ†ใน(ๆธ…ๆฐด):20ๅˆ†\r\n- (3)ใ‚ขใ‚ธใ‚ขใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใƒใƒƒใ‚ซใ‚ฝใƒณๅ‘Š็Ÿฅ(ใจใ‚Šใ‚„ใพ):5ๅˆ†\r\n- (4)็คพไผš็š„่ชฒ้กŒ่งฃๆฑบๅž‹ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ๅ‹Ÿ้›†(ๆพๆ‘):5ๅˆ†\r\n- (5)ใ‚ตใƒŸใƒƒใƒˆๅ†…Code for Kobeใ‚ปใƒƒใ‚ทใƒงใƒณใƒปใƒ–ใƒผใ‚นๆคœ่จŽ(่ฅฟ่ฐท):55ๅˆ†\r\n- (6)CJS2017ไผ็”ป้‹ๅ–ถใ‚นใ‚ฟใƒƒใƒ•ๅ‹Ÿ้›†(้ซ˜ๆฉ‹):5ๅˆ† ใ‚ตใƒŸใƒƒใƒˆใงใฎCode for Kobeๆž ใ‚ปใƒƒใ‚ทใƒงใƒณใƒปใƒ–ใƒผใ‚นๆฑบใ‚ใพใ™ใ€‚ใ‚ญใƒƒใ‚บๅ‘ใ‘ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใ‚‚ไฝต่กŒใ—ใฆๆคœ่จŽใ—ใพใ™ใ€‚\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\n\r\nใŠใฒใจใ‚Šๆง˜1,000ๅ††๏ผˆๅญฆ็”Ÿไปฅไธ‹็„กๆ–™๏ผ‰\r\n\r\n\r\n# ๆฑๅคงๅคงๅญฆ้™ข่ฌ›ๆผ”ๅ†็พ๏ฝž้•ทไบ•ใ€ๆฑๅคงใงๆŽˆๆฅญใ—ใฆใใŸใฃใฆใ‚ˆ๏ฝž\r\n\r\n้•ทไบ•ใ•ใ‚“ / ็ฅžๆˆธๅธ‚ๅ‰ต้€ ้ƒฝๅธ‚ๆŽจ้€ฒ้ƒจ ICTๅ‰ต้€ ใƒปไบ‹ๆฅญๆŽจ้€ฒๆ‹…ๅฝ“ไฟ‚้•ท\r\n\r\n\r\n## ๆŽˆๆฅญๆฆ‚่ฆ\r\n\r\nๆฑไบฌๅคงๅญฆๅคงๅญฆ้™ขๅทฅๅญฆ็ณป็ ”็ฉถ็ง‘๏ผˆ้ƒฝๅธ‚ๅทฅๅญฆๅฐ‚ๆ”ป๏ผ‰\r\n้ƒฝๅธ‚ๆŒ็ถšๅ†็”Ÿๅญฆใ‚ณใƒผใ‚น ๆฑๅคงใพใกใฅใใ‚Šๅคงๅญฆ้™ขใ€Œ้ƒฝๅธ‚็ตŒๅ–ถๆˆฆ็•ฅ็ฌฌไธ€๏ผˆ้ƒฝๅธ‚ใฎ็”ฃๆฅญใจ็ตŒๅ–ถ๏ผ‰ใ€\r\n\r\n20-50ไปฃ\r\n็”ทๅฅณ18ๅ ๅฅณๆ€ง2ๅ‰ฒ\r\n\r\nใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฏใพใกใฅใใ‚Šใซใฉใ†่ฒข็Œฎใ™ใ‚‹ใ‹\r\nใƒปๆ”ฟ็ญ–ๅฝขๆˆใธใฎใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๅฎŸ็”จ็›ฎ้€”ใฏใ„ใคใ‹\r\nใƒปๅฎŸ่ฃ…ใ™ใ‚‹ใŸใ‚ใซใฏใ€ใ€Œ่ชฐใŒใ€ใ€Œไฝ•ใ‚’ใ€ใ—ใชใ‘ใ‚Œใฐใชใ‚‰ใชใ„ใ‹ใ€‚\r\nใ€Œใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใงไฝ•ใŒใงใใ‚‹ใ‹ใ€ใ€ใ€Œๆ”ฟ็ญ–ๅฝขๆˆใซๅฎŸ็”จใงใใ‚‹ใฎใฏใ„ใคใ‹ใ€\r\n\r\n\r\n## ็ฅžๆˆธ 2020 ใƒ“ใ‚ธใƒงใƒณ\r\n\r\n2020 ๅนดใซๅ‘ใ‘ใŸๅฎŸ่กŒ่จˆ็”ป\r\n\r\n- +design\r\n- ใ‚ทใƒ“ใƒƒใ‚ฏใƒ—ใƒฉใ‚คใƒ‰\r\n- ICT\r\n- ใ‚คใƒŽใƒ™ใƒผใ‚ทใƒงใƒณ\r\n\r\nใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๆŽจ้€ฒใฎๅ–ใ‚Š็ต„ใฟ\r\nใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒกใƒณใƒˆ็คพไผšใฎๆง‹็ฏ‰ใ‚’็›ฎๆŒ‡ใ™\r\n\r\n- ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๆดป็”จใฎ็’ฐๅขƒๆ•ดๅ‚™\r\n - ใ‚คใƒ™ใƒณใƒˆใ‚ตใ‚คใƒˆใ€ŒKOBE Todayใ€๏ผšใ“ใ‚Œใพใง็ด™้ขๅฐๅˆทใ‚’ๅ‰ๆใซๅ–ๆจ้ธๆŠžใ•ใ‚Œใฆใ„ใŸใ‚‚ใฎใŒใ€ใใ†ใ„ใฃใŸๅˆถ้™ใŒ็„กใใชใฃใŸใ€‚\r\n- ใƒ‡ใƒผใ‚ฟใ‚’ๆดป็”จใงใใ‚‹ไบบๆ่‚ฒๆˆ\r\n - ๅธ‚่ทๅ“กๅ‘ใ‘ใ‚ปใƒŸใƒŠใƒผใƒปใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\n - ๅ…จ่ชฒ้•ทๅ‘ใ‘\r\n - ใ€Œในใฃใดใ‚“ใ‚นใ‚ฟใ‚คใƒซใ€ใชใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚’ใคใใ‚ใ†๏ผ\r\n - ใ™ใ้‡Žใ‚’ๅบƒใ’ใ‚‹ใ“ใจใซๆณจๅŠ›\r\n - ๅธ‚ๅ†…ใฎๅฅณๅญๅญฆ็”Ÿใƒปๅฅณๆ€งไผๆฅญ็คพๅ“กใƒปๅฅณๆ€ง่ทๅ“ก\r\n- ใƒ‡ใƒผใ‚ฟใ‚ขใ‚ซใƒ‡ใƒŸใƒผ\r\n - Yahoo! Japan\r\n - ๅนด้–“ใฎๆตใ‚Œใ‚’ใ‚ทใƒฉใƒใ‚นใงๆตใ™ใ‚ˆใ†ใซใ—ใŸ\r\n - 3ใคใฎใ‚ณใƒผใ‚นใ€Œใƒ‡ใƒผใ‚ฟใฎๆ‰ฑใ„ๆ–นใ€ใ€Œใƒ‡ใƒผใ‚ฟใฎ่ชญใฟๆ–นใ€ใ€Œใƒ‡ใƒผใ‚ฟใฎๆดปใ‹ใ—ๆ–นใ€ใงๅฎŸๆ–ฝไธญ\r\n- ไบบๆ่‚ฒๆˆ๏ผๅธ‚ๆฐ‘ๅ”ๅƒใƒป็”ฃๅญฆๅฎ˜้€ฃๆบ\r\n - ใƒใƒซใ‚ปใƒญใƒŠๅธ‚ใจใฎๅ›ฝ้š›้€ฃๆบใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใƒป่ฆ–ๅฏŸใƒ„ใ‚ขใƒผใฎ้–‹ๅ‚ฌ\r\n - ใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒกใƒณใƒˆใฎๅˆ†้‡Žใงๆฌกไปฃใ‚’ๅˆ‡ใ‚Š้–‹ใๆ–ฐใŸใชๆ‰่ƒฝ็™บๆŽ˜\r\n - ็ฅžๆˆธๅธ‚ใฎใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๆดป็”จไบ‹ไพ‹ใƒปไฝœๅ“ๅ‰ตๅ‡บ\r\n\r\n\r\n## ่€ƒๅฏŸ๏ผ‘๏ผˆๅ–ใ‚Š็ต„ใฟใซใคใ„ใฆใฎๆ•ด็†๏ผ‰\r\n\r\nใƒใƒซใ‚ปใƒญใƒŠๅธ‚\r\n\r\n- ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฏใ€Œๅธ‚ๆฐ‘ใฎๆŒใก็‰ฉใ ใฃใŸใƒ‡ใƒผใ‚ฟใ‚’ใฟใ‚“ใชใซ่ฟ”ใ—ใฆใ„ใใ“ใจใ€\r\n- ใ€Œๅธ‚ๆฐ‘ใ‚ตใƒผใƒ“ใ‚นใซ้‚„ๅ…ƒใ™ใ‚‹ใŸใ‚ใƒ‡ใƒผใ‚ฟ่“„็ฉใจๆ•ด็†ใ‚’่กŒใ†ใ€ใจใ„ใ†ใ€ๅธ‚ใฎๆ˜Ž็ขบใชใƒ“ใ‚ธใƒงใƒณใซๅŸบใฅใใ‚‚ใฎ\r\n- ใใฎๅปถ้•ท็ทšไธŠใซใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๆ”ฟ็ญ–ใŒใ‚ใ‚‹็‚นใงๅˆ็†็š„\r\n\r\n็ฅžๆˆธๅธ‚๏ผˆ2016๏ฝž๏ผ‰\r\n\r\n- ๅบๅ†…ๅค–ใงITไบบๆใ‚’็™บๆŽ˜ใƒป่‚ฒๆˆใซๆณจๅŠ›\r\n- ๆˆๅŠŸไบ‹ไพ‹ใƒปใ‚ญใƒผใƒ‘ใƒผใ‚ฝใƒณๅ‰ตๅ‡บใ‚’็›ฎๆŒ‡ใ—ใฆใ„ใ‚‹ใ€‚\r\n- 2020ๅนดใ‚’ใ‚ใฉใซใ€ๅธ‚ๆฐ‘ใซใ‚ˆใ‚‹่‡ชๅพ‹็š„ใชICTใ‚’ๆดป็”จใ—ใŸๅœฐๅŸŸ่ชฒ้กŒ่งฃๆฑบ\r\n\r\n\r\n## ่€ƒๅฏŸ๏ผ’\r\n\r\nใ€Œใƒ‡ใƒผใ‚ฟๆดป็”จใ‚’ๅฎŸ่ฃ…ใ€ใ—ใฆใ„ใ‚‹ใจใฏใฉใ‚“ใช็Šถๆณใ‹๏ผŸ\r\n่ทๅ“กใŒๅบๅ†…ใ€ๅธ‚ๆฐ‘ใ€ไป–ใฎ็ต„็น”ใซๅฏพใ™ใ‚‹่ชฌๅพ—ๆๆ–™ใ‚„ๅŠนๆžœๆคœ่จผ็ญ‰ใซใƒ‡ใƒผใ‚ฟใ‚’ๆดป็”จใงใใ‚‹ๅธ‚ๅฝนๆ‰€\r\nex: ๆ”ฟ็ญ–็ซ‹ๆกˆใซใŠใ‘ใ‚‹่ชฒ้กŒๆŠฝๅ‡บใ€ไบˆ็ฎ—่ฆๆฑ‚ๆ™‚ใฎ่ฒป็”จๅŠนๆžœไบˆๆธฌใ€ๅฎŸๆ–ฝๅพŒใฎๅŠนๆžœๆคœ่จผใƒปๆœ€้ฉๅŒ–\r\n\r\nๆ—ฅๆœฌใฎๅˆถๅบฆใ€ไป•็ต„ใฟใซๆฒฟใฃใŸใƒ‡ใƒผใ‚ฟๆดป็”จใฎๅœจใ‚Šๆ–นใ‚’่€ƒใˆใ‚‹ๅฟ…่ฆใŒใ‚ใ‚‹\r\n๏ผˆใƒใƒซใ‚ปใƒญใƒŠใจๅŒใ˜ใ‚ˆใ†ใชไบบไบ‹ๅˆถๅบฆใ‚’ๅฎŸๆ–ฝใ™ใ‚‹ใ“ใจใฏ้›ฃใ—ใ„๏ผ‰\r\n\r\n1. ใƒ‡ใƒผใ‚ฟๆดป็”จใฎๆ„็พฉใƒปๆœ‰ๅŠนๆ€งใซใคใ„ใฆ่ทๅ“กใซ็†่งฃใ—ใฆใ‚‚ใ‚‰ใ†\r\n2. ใƒ‡ใƒผใ‚ฟๅˆ†ๆžใ‚’ๆต้€šใ•ใ›ใ‚‹๏ผˆไป–ใฎๆฅญๅ‹™ใธใฎๆต็”จ๏ผ‰\r\n3. ใƒ‡ใƒผใ‚ฟๅˆ†ๆžใ‚’่กŒใ†้š›ใฎ็Ÿฅ่ญ˜ใ‚„ๆ„Ÿๆ€งใ‚’็ฃจใ„ใฆใ‚‚ใ‚‰ใ†๏ผˆ่ค‡ๅˆ็š„ใชใƒ‡ใƒผใ‚ฟๅˆ†ๆž๏ผ‰\r\n\r\n\r\n\r\n# ใŸใ‚ใพใฃใทร—ใ‚ณใƒผใƒ—ใ“ใ†ใน\r\n\r\nๆธ…ๆฐดใ•ใ‚“ / ใŸใ‚ใพๆ ชๅผไผš็คพ\r\n\r\nๅ…จๅ›ฝใฎๅœฐๅŸŸๅ‰ตๆˆใƒปไฝใฟใ‚ˆใ•ๅ‘ไธŠใ‚’็›ฎๆŒ‡ใ™\r\nไฝๆฐ‘ไธปไฝ“ๅž‹\r\n\r\nไผš็คพๆฆ‚่ฆ๏ผšIT ใจ็พๅ ดใฎๅคšๆง˜ๆ€งใƒป็”ฃๅญฆๅฎ˜ใฎ้€ฃๆบใซๅผทใฟ\r\n\r\nๅœฐๅŸŸๆดปๅ‹•ใซๆ‰‹ใ‚’ไผธใฐใ—ๅง‹ใ‚ใ‚‹ใจใ€่‰ฏใ„ใ‚‚ใฎใŒๆฒขๅฑฑใ‚ใ‚‹ใฎใซใ€ๅฎŸใฏ็ด™ๅช’ไฝ“ใงใ—ใ‹้…ใ‚‰ใ‚Œใฆใ„ใชใ„ใ€‚SNS ใ‚‚ๅฎŸใฏใ‚ฟใ‚คใƒ ใƒชใƒผใซๆƒ…ๅ ฑใ‚’ๅฑŠใ‘ใ‚‰ใ‚Œใฆใ„ใชใ„ใ€‚\r\n\r\nใ€ŒใคใชใŒใ‚Šใฎๅผทใ•ใ€\r\n\r\nๆœ€ๅฐใฎๅŠดๅŠ›ใงๆœ€ๅคงใฎๅŠนๆžœใฎๆŠ•็จฟ้–ฒ่ฆงๆ–นๅผ\r\n\r\nๆœชๆฅใฎๅœฐๅŸŸๆดปๅ‹•๏ผˆใ‚คใƒ™ใƒณใƒˆ๏ผ‰ใฎๆœ€้ฉใชๆฉŸ่ƒฝ็พค\r\nๅ…ฌๆฐ‘้คจๅ‹คๅŠด้’ๅนดไผš้คจ\r\nๅ…็ซฅๆ–ฝ่จญ\r\n่‚ฒๅ…ใจ่‡ชๆฒปไผš\r\nๆทฑๅˆปใช้Ž็–Ž้ซ˜้ฝขๅŒ–\r\n\r\n\r\n## ๅฎŸ่จผ็ตๆžœใงๅๅฟœ็އ 5% ใŒๅœฐๅŸŸ็คพไผšใจ็ตๅˆใ—ใŸ\r\n\r\nใ€Œ็ŸฅใฃใฆใŸใ‚‰่กŒใฃใŸใฎใซใ€ใ‚ฟใ‚คใƒŸใƒณใ‚ฐ โ†’ 5% \r\nใƒใƒฉใ‚ทใฎๅ ดๅˆใฏ 0.01 - 0.2 % ใŒๆจ™ๆบ–็š„\r\n\r\nๅ…จๅ›ฝๅ…ฑ้€šใฎใƒ—ใƒฉใƒƒใƒˆใƒ›ใƒผใƒ \r\n\r\n- ใ‚ปใƒ–ใƒณ้Š€่กŒ\r\n- ็ฅžๆˆธๅธ‚๏ผˆใ‚ณใƒผใƒ—ใ“ใ†ใน๏ผ‰\r\n- ๆฑไบฌๆตทไธŠๆ—ฅๅ‹•็ซ็ฝไฟ้™บๆ ชๅผไผš็คพ\r\n\r\nใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ใ—ใฆไฝฟใฃใฆใฟใพใ—ใ‚‡ใ†\r\n\r\n\r\n# ใ‚ขใ‚ธใ‚ขใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใƒใƒƒใ‚ซใ‚ฝใƒณๅ‘Š็Ÿฅ\r\n\r\nใจใ‚Šใ‚„ใพ ใฟใ‚†ใ ใ•ใ‚“\r\n\r\n4 ใ‚ซๅ›ฝใฎใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚’ไฝฟใฃใฆใ€ๅ…ฑ้€š่ชฒ้กŒใฎ่งฃๆฑบใ‚’็›ฎๆŒ‡ใ™ใƒใƒƒใ‚ซใ‚ฝใƒณ\r\n\r\nๆ—ฅๆœฌใจๅฐๆนพใจใฎ้–“ใงไปŠๅนดใฎ 5 ๆœˆใซ่ฆšๆ›ธใŒ็ตใฐใ‚Œใพใ—ใŸ\r\n<https://headlines.yahoo.co.jp/hl?a=20170512-00000005-ftaiwan-cn>\r\n\r\n7/22 ๅ›ฝ้š›ใ‚คใƒณใƒ—ใƒƒใƒˆใ‚ปใƒŸใƒŠใƒผ๏ผˆๆฑไบฌใƒปๅคง้˜ช๏ผ‰\r\n\r\nๅคง้˜ชไผšๅ ด ใƒใƒซใ‚ซใ‚น\r\nๅคง้˜ชไผšๅ ดใฎใŸใ‚ใซ็‹ฌ่‡ชใฎๆฐ—่ฑกๆƒ…ๅ ฑใŒ้…ๅธƒใ•ใ‚Œใ‚‹\r\n8/11 ใƒ‡ใƒขๅฑ•็คบใƒปใ‚ฐใƒฉใƒณใƒ•ใƒญใƒณใƒˆ ACTIVE lab ใฎไผšๅ ดใงๅฑ•็คบใ•ใ‚Œใ‚‹\r\n\r\n<https://hackcamp.doorkeeper.jp/events/62539>\r\n\r\n\r\n\r\n# ็คพไผš็š„่ชฒ้กŒ่งฃๆฑบๅž‹ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ๅ‹Ÿ้›†\r\n\r\nๆพๆ‘ ไบฎๅนณ ใ•ใ‚“ / ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒชใƒณใ‚ฏ\r\n\r\n\r\nStartup in Residence\r\n\r\n- ใ‚ตใƒณใƒ•ใƒฉใƒณใ‚ทใ‚นใ‚ณใงๅง‹ใพใฃใŸใ€ๅœฐๅŸŸใƒป็คพไผš่ชฒ้กŒ่งฃๆฑบๅž‹ใฎใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใฎไผๆฅญๆ”ฏๆด\r\n- ไฝใฟ่พผใฟใฎใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—๏ผˆArtists in Residence ใจๅŒใ˜ใ‚ˆใ†ใชใƒŽใƒช๏ผ‰\r\n- ใ‚ตใƒณใƒ•ใƒฉใƒณใ‚ทใ‚นใ‚ณใงใฏใ€Civic Tech ใ‚„ Gov Tech ใฎใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใฎ็™ป็ซœ้–€ใฎใ‚ˆใ†ใชใ‚‚ใฎ\r\n\r\nStartup In Residence @ KOBE\r\n\r\n- 9-10ๆœˆไธญๆ—ฌ ๆ›ธ้กžๅ‹Ÿ้›† : CfJ Summit ใงใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใ‚ฝใƒณใƒปใƒใƒผใƒ ใƒ“ใƒซใƒ‡ใ‚ฃใƒณใ‚ฐ\r\n- 11ๆœˆ ๅ†…้ƒจใƒ—ใƒฌใ‚ผใƒณ\r\n- 12-3 ๆœˆ ๆ”ฏๆดใƒ—ใƒญใ‚ฐใƒฉใƒ \r\n- 4 ๆœˆๅฎŸ่จผๅฎŸ้จ“\r\n\r\nๅคงๆ‰‹SIerใงใฏใชใ„ไผๆฅญใƒปๅ€‹ไบบใฎใŸใ‚ใฎไป–่‡ชๆฒปไฝ“ใธใฎๅฐŽๅ…ฅใ€ๅฎŸ็ธพไฝœใ‚Š\r\nใ€Œ้ญ‚ใŒๅ…ฅใฃใฆใ„ใชใ„ใ‚ทใ‚นใƒ†ใƒ ใ€ใŒๅฐŽๅ…ฅใ•ใ‚Œใ‚‹ไธ€ๆ–นใงใ€ๅ‚ๅ…ฅ้šœๅฃใ‚‚้ซ˜ใ„ใ€ใจใ„ใ†ใฎใ‚’ไน—ใ‚Š่ถŠใˆใ‚‰ใ‚Œใ‚‹ใ‚ˆใ†ใชไป•ๆŽ›ใ‘ใซใ—ใŸใ„ใ€‚\r\n\r\n\r\n\r\n# ใ‚ตใƒŸใƒƒใƒˆๅ†…Code for Kobeใ‚ปใƒƒใ‚ทใƒงใƒณใƒปใƒ–ใƒผใ‚นๆคœ่จŽ\r\n\r\n่ฅฟ่ฐทใ•ใ‚“\r\n๏ผˆใกใ‚‡ใฃใจ่ญฐ่ซ–ใŒๆทท็ทšๆฐ—ๅ‘ณใ ใฃใŸใฎใงใ€ๅทฎใ—ๅผ•ใ„ใฆใƒญใ‚ฐใ‚’็œบใ‚ใฆใใ ใ•ใ„โ€ฆ๏ผ‰\r\n\r\nใ‚ปใƒƒใ‚ทใƒงใƒณ\r\nใƒปใ‚ทใƒ‹ใ‚ขๅ‘ใ‘ใ‚นใƒžใƒ›ๆ•™ๅฎค\r\nใƒปๅญไพ›ใŒไฝœใ‚‹้ซ˜้ฝข่€…ๅ‘ใ‘ใƒžใƒ‹ใƒฅใ‚ขใƒซ\r\nใ€€-ๅ‹•็”ปใ‚’ไฝœๆˆใ—ใฆใฏใฉใ†ใ‹โ‡จWEBใซใ‚ขใƒƒใƒ—\r\nใƒปๅค–ๅ›ฝไบบใซ็ฅžๆˆธใฎ่ฆณๅ…‰ใฎใ—ใซใใ•๏ผˆใ‚ขใ‚ฏใ‚ปใ‚ทใƒ“ใƒชใƒ†ใ‚ฃไธ่ถณ๏ผ‰ใ‚’ๆ•™ใˆใฆใ‚‚ใ‚‰ใ†ใ€‚\r\nใƒปCfKใฎใƒกใƒณใƒใƒผใŒๅ–ใ‚Š็ต„ใฟใŸใ„ใ‘ใฉๅ›ฐใฃใฆใฆใ„ใ‚‹ใ“ใจใซใคใ„ใฆๅŠฉใ‘ใ‚’ๆฑ‚ใ‚ใ‚‹ใ€‚ไปฒ้–“ใ‚’้›†ใ†ใ€‚\r\nใƒปใ€Œใ‚ณใƒฏใ‚ฏใƒŠใ‚คใƒจ Code for Kobeใ€ใ€‚ใƒŸใƒผใƒ†ใ‚ฃใƒณใ‚ฐใซๅ‚ๅŠ ใ™ใ‚‹ใใฃใ‹ใ‘ใ‚„ใ€ๅ‚ๅŠ ๅ‰ใฎๅฐ่ฑกใจใ€ๅ‚ๅŠ ๅพŒใฎๅฐ่ฑกใ‚’ LT ๅฝขๅผใงใƒชใƒฌใƒผใง่ฉฑใ—ใฆใฟใ‚‹ใชใฉใ€‚ใ‚ขใƒณใ‚ฑใƒผใƒˆใ‚’ๆ’ฎใฃใฆใƒ‡ใƒผใ‚ฟใง่ฆ‹ใ›ใ‚‹ใ€‚\r\nโ‡จใ€Œใฒใจใ‚Šใงใ‚‚ใ„ใ„ใ‹ใ‚‰่ชฐใ‹ใจ็นซใŒใ‚‹ใ€ใซใ—ใพใ—ใ‚‡ใ†๏ผ๏ผˆๅ†…ๅฎนๆœชๅฎš๏ผ‰\r\n\r\nใƒ–ใƒผใ‚น\r\nใƒปๅ„่‡ชใฎๆŒใฃใฆใ‚‹ใ“ใ‚Œใพใงใฎๅ†™็œŸโ‡จใ„ใ„ๅ†™็œŸใซใฏใ„ใ„ใญใจใคใ‘ใ‚‹ / ๅฝ“ๆ—ฅๅ„่‡ชใŒๆ’ฎๅฝฑใ—ใŸๅ†™็œŸใ‚’้›†ใ‚ใ‚‰ใ‚Œใ‚‹ใ‚ตใ‚คใƒˆใ‚’ไฝœใฃใฆใ€ๅœฐๅ›ณไธŠใซ้›†ใ‚ใฆ่กŒใ‘ใ‚‹ใ‚ˆใ†ใชไป•ๆŽ›ใ‘ใ‚’ไฝœใ‚‹ใจ้ข็™ฝใ„ใ‹ใช\r\nใƒปๆ™ฎ้€šใซใ‚„ใ‚‹ใจ่ชฐใจใ‚‚่ฉฑใ•ใšใซใ‚ปใƒƒใ‚ทใƒงใƒณใ‚’ใŸใ ่žใ„ใฆๅธฐใฃใฆใ„ใใ‚ˆใ†ใชๆฅ่จช่€…ใŒใ„ใ‚‹ใ ใ‚ใ†ใ€‚ใใฎ้šœๅฎณใ‚’ๅ–ใ‚Š้™คใใ‚ˆใ†ใชไป•ๆŽ›ใ‘ใ€‚ใ€Œ่ชฐใ‹ใจ็นซใŒใ‚‹ใ€ใŸใ‚ใซ่‡ช็”ฑใซใ‚ฐใƒใชใฉใ‚’ๅผตใ‚Šๅ‡บใ—ใฆ็นซใŒใ‚‹ใใฃใ‹ใ‘ใซใ™ใ‚‹ใ€‚ไผšๅ ดใฎใƒ‡ใ‚ฃใ‚นใƒ—ใƒฌใ‚คใซใƒชใ‚ขใƒซใ‚ฟใ‚คใƒ ใงๅ†™ใ™ใ€‚ไปฒ่‰ฏใใชใฃใŸใฒใจใจๅ†™็œŸใ‚’ๆ’ฎใฃใฆใ‚ขใƒƒใƒ—ใ—ใฆใ„ใใ€‚ใ‚คใƒ™ใƒณใƒˆๅฐ‚็”จใƒžใ‚นใƒˆใƒ‰ใƒณใฎใ‚คใƒณใ‚นใ‚ฟใƒณใ‚นใ‚’ไฝœใ‚‹ใ€‚\r\nใƒป้ก”ๅ‡บใ—ใƒ‘ใƒใƒซ๏ผˆไพ‹๏ผš้–ขใ•ใ‚“ใ€Œใƒใƒƒใ‚ฏใ—ใพใ™ใ€ใ‚„ใ€ไนพๆฏๅงฟใชใฉ๏ผ‰\r\nโ‡จใ€Œๅ†™็œŸใ€ใซใ—ใพใ—ใ‚‡ใ†๏ผ\r\n\r\nใ‚ญใƒƒใ‚บ\r\nใƒป้ซ˜ๆ ก็”Ÿๅ‘ใ‘ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๆดป็”จ\r\nใƒปใƒžใ‚คใ‚ฏใƒฉใ‚’ไฝฟใฃใฆ็คพไผš็š„ใชใ‚„ใค\r\nใƒปๅนธใ›ใฎๆ‘ใซๅฎถๆ—้€ฃใ‚Œใฏใฉใฎใใ‚‰ใ„ใใ‚‹ใฎใ‹๏ผŸ\r\n\r\n\r\n\r\n# Code for Japan SUMMIT 2017 ๅฝ“ๆ—ฅ้‹ๅ–ถใ‚นใ‚ฟใƒƒใƒ•ๅ‹Ÿ้›†\r\n\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\ngoogle docs ใฎใƒ•ใ‚ฉใƒผใƒ ใŒใงใใฆใ„ใ‚‹ใฎใงใ€ๅฟœๅ‹Ÿใ‚’ใ‚ˆใ‚ใ—ใใŠ้ก˜ใ„ใ—ใพใ™๏ผ\r\nใ‚นใ‚ฟใƒƒใƒ•Tใ‚ทใƒฃใƒ„ใจใ‹ใ‚ใ‚Šใพใ™ใ€‚\r\n<https://docs.google.com/forms/d/e/1FAIpQLSejw1vZ50Z8xilB9j0Cswl7wvi7oVP0X59vQT2GD0UmZpMDUg/viewform>\r\n\r\n\r\n" }, { "alpha_fraction": 0.6682620644569397, "alphanum_fraction": 0.736464262008667, "avg_line_length": 19.99342155456543, "blob_id": "84045404089b671ffae2f68e7c70d13d6c4a5c15", "content_id": "964ea087ee037ad4e242cbe36d6fa5355319f0bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6213, "license_type": "no_license", "max_line_length": 162, "num_lines": 152, "path": "/_posts/2016-08-18-meeting19.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: ็ฌฌ19ๅ›žCode for Kobeๅฎšไพ‹ไผš\r\ndate: 2016-08-18 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n Google for Non Profits(ๅทไบ•):15ๅˆ†\r\n ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใƒป็ฅžๆˆธใฎๆŽจ้€ฒ(้ซ˜ๆงป):20ๅˆ†\r\n ็ฅžๆˆธๅธ‚ใ‚ฏใƒชใ‚จใ‚คใƒ†ใ‚ฃใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใ‚ฟใƒผใฎใŠไป•ไบ‹(ๅฑฑ้˜ช):40ๅˆ†\r\n ใƒใ‚ฑใƒขใƒณGoใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ(้•ทไบ•):30ๅˆ†\r\n ใ‚ณใƒผใƒ—ใ“ใ†ในใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณๅ‹Ÿ้›†(่ฅฟ่ฐท):5ๅˆ†\r\n ใใฎไป–้ฃ›ใณๅ…ฅใ‚Šๆญ“่ฟŽ๏ผ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/148660975564191/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-19th-meeting--AdqS7wQNgKPHYyEz4ra5_CKhAQ-vHglCmUdjrlMqNq3G1fVm)\r\n/ Links -\r\n\r\n\r\nไบˆๅฎš\r\n\r\n- Google for Non Profits(ๅทไบ•):15ๅˆ†\r\n- ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚- ็ฅžๆˆธใฎๆŽจ้€ฒ(้ซ˜ๆงป):20ๅˆ†\r\n- ็ฅžๆˆธๅธ‚ใ‚ฏใƒชใ‚จใ‚คใƒ†ใ‚ฃใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใ‚ฟใƒผใฎใŠไป•ไบ‹(ๅฑฑ้˜ช):40ๅˆ†\r\n- ใƒใ‚ฑใƒขใƒณGoใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ(้•ทไบ•):30ๅˆ†\r\n- ใ‚ณใƒผใƒ—ใ“ใ†ในใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณๅ‹Ÿ้›†(่ฅฟ่ฐท):5ๅˆ†\r\n- ใใฎไป–้ฃ›ใณๅ…ฅใ‚Šๆญ“่ฟŽ๏ผ\r\n- โ€ป้›†ๅˆๅ†™็œŸๆ’ฎใ‚Šใพใ™\r\n\r\n# Google for Non Profits\r\n(ๅทไบ•)\r\n\r\n<script async class=\"speakerdeck-embed\" data-id=\"2391f13ae2a64f55958434dbb7124d11\" data-ratio=\"1.33333333333333\" src=\"//speakerdeck.com/assets/embed.js\"></script>\r\n\r\nNPOๅ‘ใ‘ใงGoogleใฎใƒ—ใƒญใƒ€ใ‚ฏใƒˆใŒ็„กๆ–™ใงไฝฟใˆใ‚‹ใƒ—ใƒญใ‚ฐใƒฉใƒ \r\n\r\n- ๅ‚ๅŠ NPO๏ผš68ๅ›ฃไฝ“๏ผˆ่ฟ‘็•ฟๅœใŒๅŠๅˆ†ใใ‚‰ใ„๏ผ‰\r\n- ่ชฒ้กŒ๏ผšใ‚ใพใ‚Šๆ™‚้–“ใŒ็„กใ„ไธญใ€GoogleๅดใŒๅ…จใฆใฎใ‚ตใƒผใƒ“ใ‚นใ‚’่ชฌๆ˜Žใ—ใŸใฎใงใ€ใชใ‹ใชใ‹ใƒชใƒ†ใƒฉใ‚ทใƒผใŒ็„กใ„ๆ–นใ‚‚ๅคšใ‹ใฃใŸใฎใงใ€ไธๆบ€ใ‚‚ใ‚ใฃใŸใ€‚\r\n\r\n- Q NPOๅดใฏใฉใ‚Œใใ‚‰ใ„ใฎใƒชใƒ†ใƒฉใ‚ทใƒผใ‚’ๆŒใฃใŸๆ–นใชใฎใ‹๏ผŸ\r\n- Q ๆœฌๅฝ“ใซๆง˜ใ€…ใ€‚\r\n\r\n# ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใƒป็ฅžๆˆธใฎๆŽจ้€ฒ\r\n(้ซ˜ๆงป)\r\n\r\nใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใฎ๏ผ“ใคใฎๅŸบๆœฌๆ–น้‡\r\n\r\n- ใพใกใฎใƒ‡ใ‚ถใ‚คใƒณ\r\n- ใใ‚‰ใ—ใฎใƒ‡ใ‚ถใ‚คใƒณ\r\n- ใ‚‚ใฎใฅใใ‚Šใฎใƒ‡ใ‚ถใ‚คใƒณ\r\n\r\n[UNESCO City of Design Network](http://en.unesco.org/creative-cities/node/97)\r\n็ฅžๆˆธใฎไป–ใซ20ๆ•ฐ้ƒฝๅธ‚ใ€‚ๆ—ฅๆœฌใ ใจๅๅคๅฑ‹ใชใฉใ€‚\r\n\r\n## KIITO\r\n<http://kiito.jp/> ใ‚‚ใจใ‚‚ใจ็”Ÿ็ณธๅทฅๅ ดใ ใฃใŸใ€‚ใชใฎใงKIITOใ€‚\r\nไผš่ญฐๅฎคใฎๅ€คๆฎตใ‚‚ๆฏ”่ผƒ็š„ใŠๆ‰‹่ปฝใชใฎใงใ€ๆ˜ฏ้žไฝฟใฃใฆๆฌฒใ—ใ„ใจใฎใ“ใจใ€‚\r\n\r\n๏ผ”ใคใฎๆดปๅ‹•ๆ–น้‡\r\n\r\n- CREATIVEใฎๅฎŸ่ทตใฎๅ ดใ‚’ใคใใ‚‹\r\n- CREATIVEใฎๆ‹…ใ„ๆ‰‹ใ‚’ใคใใ‚‹\r\n- CREATIVEใชไบคๆตใฎๅ ดใ‚’ใคใใ‚‹\r\n- CREATEVEใฎๆƒ…ๅ ฑใฎ็™บไฟกใจใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใ‚’ๅบƒใ’ใ‚‹\r\n\r\nใ€Œใกใณใฃใ“ใ†ในใ€ใ‚คใƒ™ใƒณใƒˆ <http://kiito.jp/chibikkobe/>\r\n\r\n## ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚็ฅžๆˆธๆŽจ้€ฒไผš่ญฐ\r\nๅฎŸ็พใ—ใŸใ“ใจ\r\n\r\n- ๆฑ้Šๅœ’ๅœฐใซ่Š็”Ÿใ‚’ใฒใ„ใฆใ‚ซใƒ•ใ‚งใ‚’่จญ็ฝฎใ—ใŸใ‚Šใ—ใฆใ„ใ‚‹ใ€‚ใƒจใ‚ฌใ‚„่ชญๆ›ธไผšใชใฉใ€ๅธ‚ๆฐ‘ใŒไธปไฝ“็š„ใชๅ–ใ‚Š็ต„ใฟใŒใงใใ‚‹ใ‚ˆใ†ใซใ€‚\r\n- <http://www.city.kobe.lg.jp/information/press/2016/07/20160713300202.html>\r\n- ็ฅžๆˆธๅธ‚ๅบ่ˆŽใฎๅ†…้ƒจใƒ‡ใ‚ถใ‚คใƒณ\r\n\r\n## ็ฌฌ2ๅ›žใ€Œใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใƒป็ฅžๆˆธใ€ๅ‰ต้€ ไผš่ญฐ KOBE ACTIVE CREATORS MEETING\r\n<http://www.city.kobe.lg.jp/information/project/design/sozokaigi/280716.html>\r\n\r\n็ตๆžœใฏโ†‘ใฎใƒฌใƒใƒผใƒˆใซๆŽฒ่ผ‰ใ•ใ‚Œใฆใ„ใ‚‹ใ€‚\r\n\r\n- A-1 ็ฅžๆˆธใŒ็™บไฟกใ™ใ‚‹ใƒฉใ‚คใƒ–ๆ˜ ๅƒใ‚’ใ‚‚ใฃใจๅข—ใ‚„ใ™ใžใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ\r\n- A-2 ็ฅžๆˆธ๏ผˆๅœฐๅ…ƒ๏ผ‰๏ผˆใ‚†ใ‹ใ‚Š๏ผ‰ใฎๆ–นใซ่ฆณใฆใ‚‚ใ‚‰ใ†LIVEใ‚ณใƒณใƒ†ใƒณใƒ„ใ‚’ๅ‰ตใ‚‹\r\n- B-3 KOBEใƒ—ใƒฌใ‚คใ‚นใƒกใ‚คใ‚ญใƒณใ‚ฐใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ\r\n- B-4 ็ฉบใใƒใ‚„๏ฝžAirBnb\r\n- C-5,6 ไธ–็•Œไธ€่‡ช่ปข่ปŠLOVEใชใพใกKOBE\r\n- D-7 ๅ‰ต้€ ็š„ไบบๆใƒป่ตทๆฅญๅฎถใ‚’่‚ฒๆˆใ™ใ‚‹\r\n- E-8 KIITOใ‚’โ—‹โ—‹ใฎโ€˜ใ‚ดใƒŸ็ฎฑโ€™ใซใ™ใ‚‹๏ผ\r\n- E-9 ้ƒฝๅธ‚ใƒ‡ใ‚ถใ‚คใƒณ\r\n- F-10 ไบบใซๅฏ„ใ‚ŠๆทปใฃใŸ็คพไผšใ‚’ๅฎŸ็พใ™ใ‚‹ใŸใ‚ใฎ็”ฃๅญฆๅฎ˜ๅฎŸ่ทตใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ\r\n- F-11 ็คพไผš่ชฒ้กŒใ‚’็”ฃๅฎ˜ๅญฆใง่งฃๆฑบ๏ผˆ่พฒๆฅญ๏ผ‰\r\n- G-12,13 ใƒฏใ‚ซใƒขใƒŽใฎ้ƒฝๅธ‚\r\n\r\n# ็ฅžๆˆธๅธ‚ใ‚ฏใƒชใ‚จใ‚คใƒ†ใ‚ฃใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใ‚ฟใƒผใฎใŠไป•ไบ‹\r\n(ๅฑฑ้˜ช)\r\n\r\n<http://www.city.kobe.lg.jp/information/press/2015/06/20150601041801.html>\r\nไปปๆœŸใฏ 3 ๅนดใ€‚\r\nๅฑฑ้˜ชใ•ใ‚“ใฏๆฑไบฌใซไฝใ‚“ใงใ„ใ‚‹ใŒใ€้€ฑ๏ผ”ใง็ฅžๆˆธใซๆฅใฆใ„ใ‚‹ใ€‚\r\n\r\nไปŠ่ฆ‹ใˆใฆใใŸๅคงใใช่ฆ็ด ใจใ—ใฆใฏๆฌกใฎ3็‚น\r\n\r\n- ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ๆ”ฟ็ญ–ใฎๆŽจ้€ฒใจๅบƒๅ ฑ\r\n- ๆ”ฟ็ญ–ใƒป่กŒๆ”ฟใ‚ตใƒผใƒ“ใ‚นใธใฎใƒ‡ใ‚ถใ‚คใƒณๆ€่€ƒใฎๆตธ้€\r\n- ๅบƒๅ ฑๅˆถไฝœ็‰ฉใฎๅ“่ณชๅ‘ไธŠ\r\n\r\nไพ‹ใˆใฐใ€Œใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใฎ้“่ทฏใจใฏไฝ•ใ ใ‚ใ†๏ผŸใ€ใฎใ‚ˆใ†ใช่ฉฑใ€‚\r\nใ€Œใ•ใ™ใŒใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใƒป็ฅžๆˆธ๏ผใ€ใจ่จ€ใฃใฆใ‚‚ใ‚‰ใˆใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใฎใŒ็›ฎๆจ™ใ€‚\r\n\r\nๅ…ทไฝ“็š„ใชๆดปๅ‹•\r\n\r\n- ๅ…จๅบใ‹ใ‚‰ใฎ็›ธ่ซ‡ใ‚’ๅ—ใ‘ใ‚‹\r\n- ไผ็”ป็ซถไบ‰ๅ…ฅๆœญใฎๅฏฉๆŸปๅ“ก\r\n- ๅบƒๅ ฑๅˆถไฝœ็‰ฉใฎ็›ฃไฟฎใƒปใƒ‡ใ‚ฃใƒฌใ‚ฏใ‚ทใƒงใƒณ๏ผˆ๏ผ†ๅˆถไฝœ๏ผ‰\r\n- ่ทๅ“ก็ ”ไฟฎ\r\n- ใƒ‡ใ‚ถใ‚คใƒณ้ƒฝๅธ‚ใฎๅบƒๅ ฑ\r\n\r\nๆ‚ฉใฟใ‚„่ชฒ้กŒใ‚’่žใใ€ไธ€็ท’ใซ็ญ”ใˆใ‚’ๆŽขใ‚‹๏ผˆใƒ‡ใ‚ถใ‚คใƒณ็š„ใชๆ€่€ƒ๏ผ†ๆฐ‘้–“็š„ใช็™บๆƒณ๏ผ‰ใ€‚\r\n\r\nๅๅˆบใ‚‚ใ€ๆ‰‹ๅ…ƒใฎใƒ—ใƒชใƒณใ‚ฟใงๅ‡บๅŠ›ใ—ใฆใ‚‚ๆฅญ่€…ใŒๅ‡บๅŠ›ใ—ใŸใ‚‚ใฎใ‚‚ๅŒใ˜ใซใชใ‚‹ใ‚ˆใ†ใซใ€ใ‚ใˆใฆMSใ‚ดใ‚ทใƒƒใ‚ฏใ‚’ไฝฟใฃใŸใƒ‡ใ‚ถใ‚คใƒณใซใ€‚\r\nใƒ–ใƒฉใƒณใƒ‡ใ‚ฃใƒณใ‚ฐ\r\n\r\n- ใ€Œ็ฅžๆˆธไฟๅฅๅคง่‡ฃไผšๅˆใ€\r\n- ใ€Œๅคงๅญฆ้ƒฝๅธ‚KOBEใ€\r\n\r\npurple ribbon ้‹ๅ‹•ใ€่ฑŠๅณถๅŒบDVใฎใ‚ญใƒฃใƒณใƒšใƒผใƒณใ€\r\n\r\nU30 CITY KOBE\r\n\r\n- <http://www.city.kobe.lg.jp/information/press/2016/08/20160818013002.html>\r\n- <http://www.city.kobe.lg.jp/information/public/media/u30citykobe/>\r\n\r\n# ใƒใ‚ฑใƒขใƒณGoใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\n(้•ทไบ•)\r\n\r\n[ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใƒใƒผใƒ ใฎ่จญ็ฝฎ](http://www.city.kobe.lg.jp/information/press/2016/08/20160802041801.html)\r\n\r\nFacebookใฎ้•ทไบ•ใ•ใ‚“ใฎๆŠ•็จฟใฎใ„ใ„ใญใŒใชใ‚“ใจใ€Œ500่ถ…ใ€๏ผ\r\n\r\n- Q1.ใ€Œใƒใ‚ฑใƒขใƒณGO!ใ€ใ‚’ใ‚„ใฃใฆใ„ใพใ™ใ‹๏ผŸ\r\n- A ๆ‰‹ใ‚’ๆŒ™ใ’ใŸไบบใฏๅŠๅˆ†ไปฅไธŠ๏ผˆไธญใซใฏ้ฃฝใใฆใ‚„ใ‚ใกใ‚ƒใฃใŸใจใ„ใ†ไบบใ‚‚๏ผ‰\r\n- Q2.ใ€Œใƒใ‚ฑใƒขใƒณGO!ใ€ใฏใ‚นใ‚ญ๏ผŸใ‚ญใƒฉใ‚ค๏ผŸ\r\n- A ใ‚นใ‚ญใงๆ‰‹ใ‚’ๆŒ™ใ’ใŸไบบใ‚‚ๅŠๅˆ†ไปฅไธŠใ€ใ‚ญใƒฉใ‚คใงๆ‰‹ใ‚’ๆŒ™ใ’ใŸไบบใฏใ‚ผใƒญใ€ใใฎไป–ใฏใฉใกใ‚‰ใงใ‚‚ใชใ„ใจใ„ใ†ใ“ใจ๏ผŸ\r\n- Q3.ใชใœ๏ผŸ\r\n- Q4.ใ€Œใƒใ‚ฑใƒขใƒณGO!ใ€ใฏ็คพไผšใƒป่กŒๅ‹•ใ‚’ใฉใ†ๅค‰ใˆใŸ๏ผŸ\r\n- A ใ“ใ‚Œใพใงไธ€่ˆฌ็š„ใจใชใฃใฆใ„ใชใ‹ใฃใŸARใŒไธ€ๆฐ—ใซๆตธ้€ใ—ใŸ\r\n- Q5.ใ€Œใƒใ‚ฑใƒขใƒณGO!ใ€ใฏใ“ใ‚Œใ‹ใ‚‰ใฉใ†ใชใฃใฆใ„ใใจๆ€ใ„ใพใ™ใ‹๏ผŸ\r\n- Q6.ใ€Œใƒใ‚ฑใƒขใƒณGO!ใ€ใซใฉใ†ๅฏพๅฟœใ—ใฆใ„ใในใใ‹๏ผŸ\r\n- Q7.ใ‚นใƒžใƒ›ใ‚„ใใฎใ‚ตใƒผใƒ“ใ‚นใฎ้€ฒๅŒ–๏ผˆARใƒปVRใชใฉ๏ผ‰ใฏใฉใ†ใชใฃใฆใ„ใ๏ผŸ\r\n- Q8.ใ‚นใƒžใƒ›ใ‚„ใใฎใ‚ตใƒผใƒ“ใ‚นใฎ้€ฒๅŒ–๏ผˆARใƒปVRใชใฉ๏ผ‰ใซใฉใ†ๅฏพๅฟœใ™ในใ๏ผŸ\r\n" }, { "alpha_fraction": 0.597744345664978, "alphanum_fraction": 0.6090225577354431, "avg_line_length": 26, "blob_id": "5c114892ea7e55af2dc240cd109c6a3a8e96722d", "content_id": "16eeb6dd33ae03d6d8fda4650690255acb2d060b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/_data/logs.py", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "import requests\r\nimport re\r\nimport json\r\n\r\ndef line2item(line):\r\n\tm = re.match(r'\\- \\[([^\\]]+?)]\\(([^\\)]+?)\\)', line)\r\n\tif not m:\r\n\t\treturn\r\n\t\r\n\ttitle, url = m.groups()\r\n\tif url.startswith(\"/\"):\r\n\t\turl = \"https://hackmd.io%s\" % url\r\n\treturn {\"title\": title, \"url\":url}\r\n\r\n# thanks to https://github.com/hackmdio/codimd/issues/448\r\nr = requests.get(\"https://hackmd.io/s/S1sujZKzG/download\")\r\nassert r\r\nitems = [line2item(l) for l in r.text.split(\"\\n\")]\r\njson.dump([i for i in items if i], open(\"logs.json\",\"w\"), ensure_ascii=False)\r\n" }, { "alpha_fraction": 0.6846542954444885, "alphanum_fraction": 0.753119707107544, "avg_line_length": 18.26712417602539, "blob_id": "957facdb67c39842c30b1600ad684ce468988da7", "content_id": "18ded095a37f0276b1dbf186971a01874290a518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5845, "license_type": "no_license", "max_line_length": 135, "num_lines": 146, "path": "/_posts/2018-02-15-meeting36.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš36th\r\ndate: 2018-02-15 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ใƒคใƒ•ใƒผใจใฎใƒ‡ใƒผใ‚ฟใƒ‰ใƒชใƒ–ใƒณใชไบ‹ๆฅญ้€ฃๆบ๏ผ†ๅ‘Š็Ÿฅ(้•ทไบ•)15ๅˆ†\r\n (2)่Šฆๅฑ‹ๅธ‚ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ๅ‘Š็Ÿฅ(็ญ’ไบ•)10ๅˆ†\r\n (3)ใ€Œใƒใƒฃใƒฌใƒณใ‚ธโ€ผ๏ธŽใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น 2017ใ€ๅ ฑๅ‘Šใƒ‘ใƒใƒซใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ๏ผ†ใ‚ฐใƒฉใƒฌใ‚ณใƒใ‚นใ‚ฟใƒผไฝœๆˆ(COGๅ‚ๅŠ ่€…)30ๅˆ†\r\n (4)International Open Data Day 2018ๅ‚ๅŠ ่€…ๅคงๅ‹Ÿ้›†(ๅพŒ่—คใƒปๅทไบ•ใƒป่ฅฟ่ฐท)10ๅˆ†\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/984119391751264/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/ByXiu0zvf)\r\n/ Links:\r\n\r\n# Code for Kobe 36th\r\n\r\nITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ŒCode for Kobeใ€ใฎ็ฌฌ36ๅ›žๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผๅˆใ‚ใพใ—ใฆใฎๆ–นๅซใ‚ใฉใชใŸใงใ‚‚ๅ‚ๅŠ ๆญ“่ฟŽใงใ™๏ผ\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n(http://www.kigyoplaza-hyogo.jp/)\r\nโ€ป19ๆ™‚ไปฅ้™ๅ…ฅ้คจ็ตŒ่ทฏใŒ่ค‡้›‘ใซใชใ‚Šใพใ™๏ผ้…ๅˆปใ•ใ‚Œใ‚‹ๆ–นใฏใƒกใƒƒใ‚ปใƒผใ‚ธ็ญ‰ใงใŠ็Ÿฅใ‚‰ใ›ใใ ใ•ใ„ใ€‚\r\n\r\n2.ใŠๅ“ๆ›ธใ(ๆ•ฌ็งฐ็•ฅ)\r\nใ€œไนพๆฏใ€œ\r\n(1)ใƒคใƒ•ใƒผใจใฎใƒ‡ใƒผใ‚ฟใƒ‰ใƒชใƒ–ใƒณใชไบ‹ๆฅญ้€ฃๆบ๏ผ†ๅ‘Š็Ÿฅ(้•ทไบ•)15ๅˆ†\r\n(2)่Šฆๅฑ‹ๅธ‚ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ๅ‘Š็Ÿฅ(็ญ’ไบ•)10ๅˆ†\r\n(3)ใ€Œใƒใƒฃใƒฌใƒณใ‚ธโ€ผ๏ธŽใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น 2017ใ€ๅ ฑๅ‘Šใƒ‘ใƒใƒซใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ๏ผ†ใ‚ฐใƒฉใƒฌใ‚ณใƒใ‚นใ‚ฟใƒผไฝœๆˆ(COGๅ‚ๅŠ ่€…)30ๅˆ†\r\n(4)International Open Data Day 2018ๅ‚ๅŠ ่€…ๅคงๅ‹Ÿ้›†(ๅพŒ่—คใƒปๅทไบ•ใƒป่ฅฟ่ฐท)10ๅˆ†\r\nโ€ปใใฎไป–่ชฟๆ•ดไธญ๏ผ†ๅ‹Ÿ้›†ไธญ๏ผ้ฃ›ใณๅ…ฅใ‚Šใ‚‚ๆญ“่ฟŽ๏ผ\r\nใ€œไบคๆตใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\n1,000ๅ††(ๅญฆ็”Ÿไปฅไธ‹็„กๆ–™)\r\n\r\n\r\n## ใƒคใƒ•ใƒผใจใฎใƒ‡ใƒผใ‚ฟใƒ‰ใƒชใƒ–ใƒณใชไบ‹ๆฅญ้€ฃๆบใซใคใ„ใฆ\r\n\r\n้•ทไบ•ใ•ใ‚“\r\n\r\nใƒ‡ใƒผใ‚ฟใ‚ขใ‚ซใƒ‡ใƒŸใƒผใ‹ใ‚‰่ชฒ้กŒใ‚’ๆŽ˜ใ‚Š่ตทใ“ใ›ใ‚‹ใ‚ˆใ†ใซใชใฃใฆใใŸใ€‚\r\nYahooJใจใฎไบบๆไบคๆต๏ผšใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผใ‚ทใƒƒใƒ—ใ€ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ๆ”ฏๆดใ€ๅƒใๆ–นๆ”น้ฉ\r\n็ฅžๆˆธๅธ‚ใ‹ใ‚‰ใ‚‚ใƒคใƒ•ใƒผใซ่กŒใฃใฆใ„ใŸใ‚Šใ™ใ‚‹\r\n\r\nใƒ‡ใƒผใ‚ฟใƒ‰ใƒชใƒ–ใƒณใช่ชฒ้กŒ่งฃๆฑบ\r\nใƒ‡ใƒผใ‚ฟใƒ‰ใƒชใƒ–ใƒณใช่ชฒ้กŒใธใฎใ‚ขใƒ—ใƒญใƒผใƒใŒใงใใ‚‹ไบบๆ่‚ฒๆˆ\r\n\r\nใƒคใƒ•ใƒผใฎใƒ•ใƒฌใƒผใƒ ใƒฏใƒผใ‚ฏใฎไธญใฎไธ€ใคใจใ—ใฆ็ฅžๆˆธๅธ‚ใŒๅ…ฅใฃใŸๆ ผๅฅฝใซใชใฃใฆใ„ใ‚‹ใ€‚\r\nใ€Œใƒ‡ใƒผใ‚ฟใƒ•ใ‚ฉใƒฌใ‚นใƒˆใ€\r\n\r\nใ€Œไธ‰ๅฎฎๅ†ๆ•ดๅ‚™ใ€ใ€Œๆ•‘ๆ€ฅใ€ใฎไบŒๆœฌๆŸฑใงใฏใ˜ใ‚ใ‚‹ใ“ใจใซใชใฃใŸใ€‚\r\n- ไธ‰ๅฎฎๆ•ดๅ‚™ใฏไบบๆต่งฃๆžใŒไธญๅฟƒใ€‚\r\n- ๆ•‘ๆ€ฅ่ปŠ็จผๅƒ็Šถๆณใฎ็พ็Šถๅˆ†ๆžใ€็ฎก่ฝ„ๅค–ๅ‡บๅ‹•ใฎๅ‹•ๅ‘ๅˆ†ๆžใ€ๆžถ้›ปๅ‰ใฎๆคœ็ดขใฎๅ‹•ใใ€ไบบๅฃๅ‹•ๆ…‹\r\n\r\n\r\n### kobe-barcelona.net\r\nใƒใƒซใ‚ปใƒญใƒŠใ‹ใ‚‰ใฏๆฅใ‚ŒใชใใชใฃใŸ\r\n\r\nhttp://kobe-barcelona.net/#infoKOBE\r\n\r\nQ.ใƒคใƒ•ใƒผใฏ็ฅžๆˆธๅธ‚ใฎใƒ‡ใƒผใ‚ฟ่ฆ‹ใ‚Œใ‚‹ใฎ๏ผŸ\r\nA.ๅ”ๅฎšใ‚‚ใ‚ใ‚‹ใฎใงๅ€‹ไบบๆƒ…ๅ ฑใ‚’ใƒžใ‚นใ‚ญใƒณใ‚ฐใ—ใŸไธŠใงใ€‚\r\n\r\n\r\n## ่Šฆๅฑ‹ๅธ‚ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\n็ญ’ไบ•ใ•ใ‚“\r\n\r\n3/21 ้–‹ๅ‚ฌใ€Œ่Šฆๅฑ‹ใชใ‚“ใงใ‚‚ใƒ•ใ‚งใ‚นใ‚ฟใ€ๅ†…ใงใ€\r\nใ€Œไธ–ไปฃใ‚’่ถ…ใˆใฆใ€ใคใชใŒใ‚‹็คพไผšใฎไฝœใ‚Šๆ–นใ€ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใ‚’้–‹ๅ‚ฌใ—ใพใ™ใ€‚\r\nๅบƒๅ ฑๆœชๆŽฒ่ผ‰\r\n\r\nไบบ้–“ไธญๅฟƒ่จญ่จˆๆŽจ้€ฒๆฉŸๆง‹้–ข่ฅฟๆ”ฏ้ƒจ\r\n\r\n- 3-4 ใƒใƒผใƒ ใซใ‚ใ‹ใ‚Œใฆใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใ‚ฝใƒณ\r\n- ็คพไผšไฟ้šœ๏ผˆๅŒป็™‚ใƒปไป‹่ญท๏ผ‰่ฒปใฎๆŠ‘ๅˆถใ€้ซ˜้ฝข่€…\r\n\r\nๅ‚ๅŠ ่€…ๅ‹Ÿ้›†ใ—ใพใ™๏ผ\r\n\r\n### ใ‚ใ—ใ‚„ใŸใŒใ‚„ใ™\r\nๆ˜Žๆ—ฅ 2/16 ้–‹ๅ‚ฌไบˆๅฎš\r\n\r\nhttps://www.facebook.com/events/219146908628961/\r\n\r\n\r\n## COG 2017\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nhttp://park.itc.u-tokyo.ac.jp/padit/cog2017/\r\n\r\n\r\n+ ่‡ชๆฒปไฝ“ใ‹ใ‚‰่ชฒ้กŒใจ่ชฒ้กŒใฎๅˆ†ๆžใซไฝฟใˆใ‚‹ใƒ‡ใƒผใ‚ฟใ‚’ๆๅ‡บ\r\n+ ๅธ‚ๆฐ‘ใ‹ใ‚‰ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใ‚’ๅ‹Ÿ้›†\r\n+ ๅฏฉๆŸป\r\n\r\nๅŽปๅนดใฏ็ฅžๆˆธๅธ‚ใ‹ใ‚‰่ชฒ้กŒใŒๅ‡บใฆใ„ใŸใ‘ใ‚Œใฉใ‚‚ใ€ไปŠๅนดใฏ็„กใ‹ใฃใŸใ€‚ไปŠๅนดใฏไธ‰็”ฐๅธ‚ใฎ่ชฒ้กŒใซๅ–ใ‚Š็ต„ใ‚€ใ“ใจใซใ—ใŸใ€‚\r\n\r\nไธ‰็”ฐๅธ‚ใ‚’่จชๅ•ใ—ใฆใƒ’ใ‚ขใƒชใƒณใ‚ฐ\r\n- ็พ็Šถใฏๅธ‚ๆฐ‘ใ‹ใ‚‰ๅ›ฐใฃใฆใ„ใ‚‹ใจใ„ใ†ๅฃฐใฏ็‰นใซใชใ„\r\n- ใŸใ ใ—ๅฐ†ๆฅๅ›ฐใ‚‹ใงใ‚ใ‚ใ†ไธๅฎ‰ใฏใ‚ใฃใฆใ€ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒใ‚นใ‚’ๅฎŸ้จ“็š„ใซ้‹ๅ–ถใ—ใฆใฟใŸใ‚Šใ—ใฆใ„ใ‚‹\r\n\r\nๆๅ‡บใ—ใŸใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใฏ้›ฒๅ—ๅธ‚ใฎใ‚‚ใฎใ‚’ๅ‚่€ƒใซใ—ใฆๆๅ‡บใ—ใŸ\r\nใ€Œใพใ‹ใ›ใฆไผšๅ“กใ€ใจใ„ใ†ไป•็ต„ใฟ https://medium.com/@takahashi.mune/%E8%A1%8C%E6%94%BF%E3%82%BA%E9%9B%B2%E5%8D%97%E5%B8%82%E8%A6%96%E5%AF%9F-54cdb23b27f1\r\n\r\n๏ฝžๅฎšไพ‹ไผšๅพŒใซๅ’ŒๅดŽใ•ใ‚“ใ‹ใ‚‰ใ„ใŸใ ใ„ใŸๆ•ๆ‰\r\nๆ˜จๆ—ฅใฎๅฎšไพ‹ไผšใงใ€Œๅ…ฑๅŠฉใฏไฝๆฐ‘ใง่กŒใ†ใ‚‚ใฎใงใ€่กŒๆ”ฟใŒ้–ขไธŽใ™ใ‚‹ใ“ใจใฏ็–‘ๅ•ใ€ใจใ„ใ†็™บ่จ€ใŒใ‚ใ‚Šใ€ใ€Œ่กŒๆ”ฟใŒๅ…ฑๅŠฉใธใฎใƒˆใƒชใ‚ฌใƒผใ‚’ๅผ•ใใ“ใจใŒๅฟ…่ฆใ€ใจๅ›ž็ญ”ใ—ใพใ—ใŸใŒใ€่จ€่‘‰่ถณใ‚‰ใšใฎใจใ“ใ‚ใŒใ‚ใ‚Šใพใ—ใŸใฎใง่ฃœ่ถณใ•ใ›ใฆใใ ใ•ใ„ใ€‚\r\nใ€Œๅ…ฑๅŠฉใ€ใฎๅฏพ่ฑก็ฏ„ๅ›ฒใฏไฝๆฐ‘ใซ้™ๅฎšใ•ใ‚Œใ‚‹ใ‚‚ใฎใงใฏใชใใ€ใใฎๅŠนๆžœใ‚’ๆœ€้ฉๅŒ–ใ™ใ‚‹ใŸใ‚ใซใฏใ€Œ็”ฃๅญฆๅฎ˜ๆฐ‘ใฎๅ”ๅƒใ€ใŒๅŸบ็›คใจใ—ใฆๅญ˜ๅœจใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ใ™ใชใ‚ใกใ€ใใ‚Œใžใ‚Œใฎ็ซ‹ๅ ดใงๆ˜Ž็ขบใชๅฝนๅ‰ฒใŒๅญ˜ๅœจใ—ใ€ใ“ใ‚ŒใŒไบ’้…ฌ็š„ใชไฟก้ ผ้–ขไฟ‚ใฎไธŠใงๅฎŸ็พใ•ใ‚Œใ‚‹ในใใ‚‚ใฎใงใ‚ใ‚‹ใจ่€ƒใˆใพใ™ใ€‚\r\nไธ‹่จ˜URLใซใ‚ขใƒƒใƒ—ใ—ใŸใ€Œใ‚ทใƒชใ‚ณใƒณใƒใƒฌใƒผๅœฐๅŸŸๅ†ๆดปๆ€งๅŒ–ใ€ใซ้–ขใ™ใ‚‹ๆ‹™ๆ–‡ใ‚’ใ€ใ”ๅ‚่€ƒใ„ใŸใ ใ‘ใ‚‹ใจๅนธใ„ใงใ™ใ€‚\r\nhttps://hyocom.jp/blog/blog.php?key=286151\r\nhttps://hyocom.jp/blog/blog.php?key=286159\r\nhttps://hyocom.jp/blog/blog.php?key=286160\r\nhttps://hyocom.jp/blog/blog.php?key=286162\r\nhttps://hyocom.jp/blog/blog.php?key=286163\r\nhttps://hyocom.jp/blog/blog.php?key=286164\r\n\r\n## IODD2018 3/3\r\nCode for Kobeใ€€ใฏCode for ้ธๆŒ™ใ€€็ฅžๆˆธๅธ‚็‰ˆ\r\nWikiDateใ€€ใซใฉใ‚“ใฉใ‚“็™ป้Œฒใ—ใพใ™๏ผ\r\nไบ‹ๅ‰ใซใƒใƒณใ‚บใ‚ชใƒณใ—ใŸใ„\r\n\r\n\r\n## ็ฅžๆˆธใ‚ซใƒ•ใ‚งใƒปใ‚นใ‚คใƒผใƒ„ๆ•ฃ็ญ–ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃใƒผ ใ‚„ใฃใฆใฟใŸใ‚ˆ\r\nๆฆŽใ•ใ‚“\r\n\r\nใƒžใƒƒใƒ”ใƒณใ‚ฐใจๅˆใ‚ณใƒณใฎๅˆไฝ“็‰ˆ\r\n\r\nๅ…ƒ็”บโ†’ไธ‰ๅฎฎ ๆœ€ๅพŒใฏ plug078 ใธ\r\nhttp://www.plug078.com/\r\n\r\nMAPS.ME ใ‚’ๅˆฉ็”จ\r\nhttps://wiki.openstreetmap.org/wiki/MAPS.ME\r\n\r\nๅ‚ใƒŽไธ‹ใ•ใ‚“ใŒ้‡่ค‡ใ‚’ๅ‡ฆ็†ใ—ใฆ็™ป้Œฒ\r\n\r\n\r\n## Kobe x BRAVE\r\n่–ฌๅธซๅฏบใ•ใ‚“\r\n\r\n็ฅžๆˆธๅธ‚ ๅŒป็™‚ๆ–ฐ็”ฃๆฅญๆœฌ้ƒจ\r\n\r\n3/5 Demo day ่ด่ฌ›่€…ๅ‹Ÿ้›†ไธญ\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7148523330688477, "alphanum_fraction": 0.7545174360275269, "avg_line_length": 19.590476989746094, "blob_id": "4ca6f5af6229dd2e2ff694bb094e74f3b693ba58", "content_id": "533e30876b330d237a1977b6337b4eb6b6a5c12f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4773, "license_type": "no_license", "max_line_length": 122, "num_lines": 105, "path": "/_posts/2017-02-16-meeting25.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš25th\r\ndate: 2017-02-16 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผๅƒ่‘‰ๅธ‚ใซ่กŒใฃใฆใฟใŸ(ๅฑฑๆœฌ)\r\n (2)ๅˆ้–‹ๅ‚ฌ๏ผ็ฅžๆˆธ็™บใ‚ฏใƒญใ‚นใƒกใƒ‡ใ‚ฃใ‚ขใ‚คใƒ™ใƒณใƒˆใ€Œ078ใ€ใซใคใ„ใฆ(้•ทไบ•ใ€่—คไบ•)\r\n (3)ใใฎไป–็พๅœจ่ชฟๆ•ดไธญ๏ผ†ๅ‹Ÿ้›†ไธญ\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1770695199916282/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-25th-meeting--AdpVScLNbt~tthmzkq2exnm4AQ-98DzOkgRypB8h1Yjgl3kp)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“ใƒฌใƒใƒผใƒˆ](http://masaki-ravens.com/main/blog/everythingispractice/?p=1245)\r\n\r\n+ (1)ใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผๅƒ่‘‰ๅธ‚ใซ่กŒใฃใฆใฟใŸ(ๅฑฑๆœฌ)\r\n+ (2)ๅˆ้–‹ๅ‚ฌ๏ผ็ฅžๆˆธ็™บใ‚ฏใƒญใ‚นใƒกใƒ‡ใ‚ฃใ‚ขใ‚คใƒ™ใƒณใƒˆใ€Œ078ใ€ใซใคใ„ใฆ(้•ทไบ•ใ€่—คไบ•)\r\n+ (3)ใใฎไป–็พๅœจ่ชฟๆ•ดไธญ๏ผ†ๅ‹Ÿ้›†ไธญ\r\n\r\n# ใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผๅƒ่‘‰ๅธ‚ใซ่กŒใฃใฆใฟใŸ\r\n\r\nๅฑฑๆœฌใ•ใ‚“๏ผ Yahoo Japan\r\n\r\nๅƒ่‘‰ๅธ‚ๅฝนๆ‰€ใฎใ‚ณใƒผใƒใƒฌใƒผใƒˆใƒ•ใ‚งใƒญใƒผ่กŒใฃใฆใใŸใ€‚ๆ™ฎๆฎตใฏๅคง้˜ชใ‚ชใƒ•ใ‚ฃใ‚น Code for Osaka ใจใ‹ใ‚ˆใ่กŒใใ€‚\r\nๅƒ่‘‰ๅธ‚้•ทใซใ‚‚็™บ่กจใ—ใŸ่ณ‡ๆ–™ใ€‚\r\n\r\n[myThings](https://mythings.yahoo.co.jp/) ใฎ็น‹ใŒใ‚‹ๅ…ˆใฎไธ€ใคใจใ—ใฆใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚’็›ฎ่ซ–ใ‚“ใงใƒใƒฃใƒฌใƒณใ‚ธใ—ใŸใ€‚\r\n\r\n่จญๅฎšใ•ใ‚ŒใŸใƒŸใƒƒใ‚ทใƒงใƒณ๏ผšใ€Œ้˜ฒ็ฝใซๅฏพใ™ใ‚‹ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฎๅˆฉๆดป็”จใ€\r\n\r\nใƒ‡ใƒผใ‚ฟใ‚ซใ‚ฟใƒญใ‚ฐใ‚‚ใ‚ใ‚‹ใ‘ใ‚Œใฉใ‚‚ใ€ใฉใฎใใ‚‰ใ„ๆดป็”จใ•ใ‚Œใฆใ„ใ‚‹ใฎใ‹ใ‚ˆใใ‚ใ‹ใ‚‰ใชใ„ใฎใŒๆ‚ฉใฟใ€‚\r\nใƒ•ใƒญใƒผใŒใฉใ‚“ใฉใ‚“็ฉใฟ้‡ใชใฃใฆๆฅญๅ‹™้‡ใ ใ‘ใŒ่†จใ‚‰ใ‚€ใ€‚\r\n\r\nๆคœ็ดขใƒญใ‚ฐใ‹ใ‚‰ๆดป็”จ็Šถๆ…‹ใŒๅˆ†ใ‹ใ‚‰ใชใ„ใ‹ใ‚’่ชฟในใฆใฟใŸใ€‚ๅƒ่‘‰ๅธ‚ใฏใƒใ‚ถใƒผใƒ‰ใƒžใƒƒใƒ—ใฎ้ †ไฝใŒ้ก•่‘—ใซไฝŽใ‹ใฃใŸใ€‚็”จ่ชžใฎๅ•้กŒใ‚‚ใ‚ใ‚Šใใ†ใ€‚\r\nไฟ่‚ฒๆ‰€ใƒžใƒƒใƒ—ใฏ 11 ๆœˆใ”ใ‚ใซใ‚นใƒ‘ใ‚คใ‚ฏใŒใ‚ใ‚‹ใ€‚\r\n\r\nCode for Chiba ใจใฎ้ก”ๅˆใ‚ใ›ใ€‚ใ‚ตใƒผใƒ“ใ‚นใฎ็ถ™็ถšๆ€งใŒๅ•้กŒใ€‚\r\n\r\nๆฅญๅ‹™ๆ”น้ฉๆŽจ้€ฒ่ชฒใงๅ‹‰ๅผทไผš๏ผšไพ‹ใˆใฐ็ฃๅฎณใฎใŸใ‚ใฎ IoT ใƒ‡ใƒใ‚คใ‚นใจใ‹ใ€‚ๅทใฎไธŠๆตใงๆฐดใฎๆตใ‚ŒใŒ่‰ฏใใชใฃใฆใ„ใ‚‹ใ‹ใฉใ†ใ‹ใ‚’ๆคœ็Ÿฅใ™ใ‚‹ๆฐด่ปŠใฎใƒ‡ใƒใ‚คใ‚นใจใ‹ใ€‚\r\n\r\n็พๆ™‚็‚นใงๅ…ฌ้–‹ใ•ใ‚Œใฆใ„ใ‚‹ใƒ‡ใƒผใ‚ฟใงๅœฐๅ›ณไฝœๆˆ๏ผš่กŒๆ”ฟใƒ‡ใƒผใ‚ฟใจ[ๅƒ่‘‰ใƒฌใƒ](https://chibarepo.secure.force.com/)ใฎใƒžใƒƒใ‚ทใƒฅใ‚ขใƒƒใƒ—ใงไฝœใฃใฆใฟใŸใ€‚\r\n\r\nๅˆฅใฎๅด้ขใจใ—ใฆใฏ้–‹็คบ่ซ‹ๆฑ‚ใ‹ใ‚‰ใ‚ขใƒ—ใƒญใƒผใƒใ€‚้ขจๆฐดๅฎณๅฑฅๆญดใƒ‡ใƒผใ‚ฟใฏใ€ไธๅ‹•็”ฃๆฅญ็•ŒใฎไบบใŒใ‚ˆใๅ‚็…งใ™ใ‚‹ใ€‚ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟๅŒ–ใฎใ‚ฟใƒผใ‚ฒใƒƒใƒˆใจใ—ใฆ่‰ฏใ„ใ ใ‚ใ†ใ€‚ๅƒ่‘‰ๅธ‚ใฏๅ† ๆฐดใฎๆƒ…ๅ ฑใŒๅคšใ„ใ€‚\r\n\r\nๅธ‚ๆฐ‘ใฏ่‡ชๅˆ†ใŒๆฌฒใ—ใ„ใ‚‚ใฎใ‚’ไฝœใ‚‹ใ€‚่กŒๆ”ฟใฎ่ฒ ๆ‹…ใฏ็‰นใซ่ปฝใใฏใชใ‚‰ใชใ„ใ€‚ใ‚คใƒณใ‚ปใƒณใƒ†ใ‚ฃใƒ–ใฎ่จญๅฎšใŒ้›ฃใ—ใ„ใ€‚\r\n\r\nใƒ‡ใƒผใ‚ฟๆดป็”จ้‡ใ‚’ๆธฌใ‚‹ใซใ—ใฆใ‚‚ใ€็พๆ™‚็‚นใงใฎๆธฌๅฎšๅ€คใฏๅฐใ•ใ™ใŽใฆใ€ใ“ใฎๅ€คใงๅˆคๆ–ญใ‚’ใ™ใ‚‹ใจๅคฑๆ•—ใ—ใใ†ใ€‚\r\n\r\n# ๅˆ้–‹ๅ‚ฌ๏ผ็ฅžๆˆธ็™บใ‚ฏใƒญใ‚นใƒกใƒ‡ใ‚ฃใ‚ขใ‚คใƒ™ใƒณใƒˆใ€Œ078ใ€ใซใคใ„ใฆ\r\n\r\n## ๆฆ‚่ฆ\r\n\r\n้•ทไบ•ใ•ใ‚“\r\n\r\nๅŸบๆœฌ็š„ใซใฏๆฐ‘ใƒ™ใƒผใ‚นใงใ‚„ใฃใฆใ„ใ‚‹ใ€‚ๅฎŸ่กŒๅง”ๅ“กไผšๅฝขๅผใ€‚\r\n\r\n็ฅžๆˆธๅธ‚ 2020 ใฎใƒ“ใ‚ธใƒงใƒณ๏ผšใ€Œ่‹ฅ่€…ใซ้ธใฐใ‚Œใ‚‹ใพใกใ€\r\n\r\n5/6,7,8 ใงใ€Œ078ใ€ใ‚’้–‹ๅ‚ฌ\r\n\r\n5/7 [Comin' Kobe](http://comingkobe.com/) : Rock fes ใจ ใ€Œ078ใ€ใจ้€ฃๆบใ™ใ‚‹ใ“ใจใ‚‚ๆฑบใพใฃใŸ\r\n\r\n## ๅฎŸ้จ“้ƒฝๅธ‚็ฅžๆˆธใฎๅฎŸ็พใซๅ‘ใ‘ใฆ\r\n\r\n่—คไบ•ใ•ใ‚“\r\n\r\nไปŠใพใงใฎๅฏ„ใ›้›†ใ‚ใ‚’ใ‚„ใ‚‹ใจใ„ใ†ใ‚ˆใ‚Šใฏใ€็ฅžๆˆธใ‚‰ใ—ใ„ใ‚ณใƒณใƒ†ใƒณใƒ„ใ‚’็”Ÿๆˆใ™ใ‚‹ๆ–นๅ‘ใซใ—ใŸใ„ใ€‚ๆง˜ใ€…ใชๅˆ†้‡Žใงๆดป่บใ•ใ‚Œใฆใ„ใ‚‹ไบบใจใ€ใใ“ใซๅ‚ๅŠ ใ™ใ‚‹ๅธ‚ๆฐ‘ใƒป่กŒๆ”ฟใ€ใจใ„ใ†ๅพช็’ฐใ‚’็นฐใ‚Š่ฟ”ใ™๏ผˆ็คพไผšๅฎŸ้จ“ใƒป็คพไผšๅฎŸ่ฃ…ใ™ใ‚‹๏ผ‰ใ“ใจใงใ€ๆ–ฐใ—ใ„ไพกๅ€ค็™บ่ฆ‹ใซ็ตใณไป˜ใ‘ใŸใ„ใ€‚\r\n\r\nๅญฆ็”ŸใŒ้›†ใพใ‚‹ใ€‚ๆ•ใพใˆใŸใ„ใ€‚\r\n\r\n๏ผ“ๆ‹ ็‚นใงๅฑ•้–‹ใ™ใ‚‹ไบˆๅฎšใ€‚\r\n\r\n- KIITO\r\n- ใฟใชใจใฎใ‚‚ใ‚Šๅ…ฌๅœ’ \r\n- ๆฑ้Šๅœ’ๅœฐ\r\n\r\nไพ‹ใˆใฐ KIITO ใงใฏ\r\n\r\n- ใ‚ซใƒณใƒ•ใ‚กใƒฌใƒณใ‚น\r\n- ใƒˆใƒฌใƒผใƒ‰ใ‚ทใƒงใƒผ\r\n- ๆ˜ ็”ป้–ข้€ฃๅฑ•็คบ\r\n- ใƒ‘ใƒผใƒ†ใ‚ฃใƒผ๏ผˆ้Ÿณๆฅฝใƒป้ฃŸใฎไบคๆตใ‚คใƒ™ใƒณใƒˆ๏ผ‰TEDxใจใ‹\r\n- ใกใณใฃใ“ใ†ใน / ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\n\r\nใฟใชใจใฎๆฃฎๅ…ฌๅœ’\r\n- ROCK x TECHNO\r\n\r\nๆฑ้Šๅœ’ๅœฐ๏ผšใ•ใพใ–ใพใชใ‚คใƒ™ใƒณใƒˆ\r\n\r\n็ฅžๆˆธใฎไปฃๅ่ฉžใซใชใ‚‹ใ‚ˆใ†ใชใ‚คใƒ™ใƒณใƒˆใซใ—ใŸใ„ใ€Œ็ฅžๆˆธใจใ„ใˆใฐ078ใ€ใ‚’็›ฎๆŒ‡ใ™ใ€‚\r\n\r\nๆœญๅนŒ [NoMaps](https://no-maps.jp/) ็ฆๅฒก [ๆ˜Žๆ˜Ÿๅ’Œๆฅฝ](http://2016.myojowaraku.net/) ใชใฉใจใ‚‚้€ฃๆบใ‚’็›ฎๆŒ‡ใ™ใ€‚\r\n\r\nQA\r\n\r\n- Q:ใ€Œ็„กๆ–™ใ‚คใƒ™ใƒณใƒˆใ€ใฏใฉใฎ็จ‹ๅบฆ้‡่ฆ–ใ™ใ‚‹ใฎใ‹\r\n- A: SXSW ใฏๆœ‰ๆ–™ใƒใ‚ฑใƒƒใƒˆใ‚‚ใ‚ใ‚‹ใ—ใ€ใใ‚Œใ ใ‘ใฎไพกๅ€คใ‚’ๅ‡บใ›ใ‚‹ใ‚คใƒ™ใƒณใƒˆใซใ—ใŸใ„ใจใ„ใ†ๆ€ใ„ใ‚‚ใ‚ใ‚‹ใŒใ€ใพใšใฏไธ€ๅนด็›ฎใฏ็„กๆ–™ใ‚คใƒ™ใƒณใƒˆใงใ€‚\r\n- Q: ๅฎฟๆณŠใฎๅ•้กŒใฏ๏ผŸ\r\n- A: ๆ‚ฉใฟไธญ\r\n- Q:ใ€Œ่‹ฅ่€…ๅ‘ใ‘ใ€ใƒใ‚คใƒณใƒˆใŒใ‚ˆใใ‚ใ‹ใ‚‰ใชใ„\r\n- A: ่‹ฅ่€…ใ˜ใ‚ƒใชใใฆใ‚‚ใ‚ˆใ„ใŒใ€ใจใซใ‹ใใ‚„ใ‚‹ใ€‚ใ‹ใค่‰ฒใฎใคใ„ใฆใ„ใชใ„ใ‚ญใƒผใƒฏใƒผใƒ‰ใงใ€‚\r\n\r\n" }, { "alpha_fraction": 0.6823220252990723, "alphanum_fraction": 0.7555425763130188, "avg_line_length": 20.85333251953125, "blob_id": "96fea56865a26de14cb3890a00400a88a22f684a", "content_id": "a6506d92b85d4fdc9659dd6d5ab8dbf4013d63a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6416, "license_type": "no_license", "max_line_length": 162, "num_lines": 150, "path": "/_posts/2016-10-20-meeting21.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš21st\r\ndate: 2016-10-20 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n โ‘ ไธญๅญฆใƒป้ซ˜ๆ กใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ็š„ๆ€่€ƒๆ•™ๆไฝœๆˆ๏ผˆๅ‰็”ฐ๏ผ‰็ด„20ๅˆ†\r\n โ‘กCoderDojo็ฅžๆˆธ็ดนไป‹๏ผ†ใƒกใƒณใ‚ฟใƒผๅ‹Ÿ้›†๏ผˆๆจชๅฑฑ๏ผ‰็ด„10ๅˆ†\r\n โ‘ขไธ‰็”ฐใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ‘Š็Ÿฅ๏ผˆ้ซ˜ๆฉ‹๏ผ‰็ด„20ๅˆ†\r\n โ‘ฃใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒช้€ฒๆ—ๅ ฑๅ‘Š๏ผˆๅพŒ่—ค๏ผ‰็ด„10ๅˆ†\r\n โ‘คใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใฎๆ–ฐใŸใช้€ฒๅŒ–ๅฝข-ๆ–ฐใ—ใ„ๆœชๆฅใฎๅ‰ตใ‚Šๆ–น-\r\n ใ€€๏ผˆ500 Startups, US-Japan Liaison Officer ๅฑฑไธ‹๏ผ‰็ด„10ๅˆ†\r\n โ‘ฅRESAS APIๅ…ฌ้–‹๏ผˆๅบŠไธฆfromใƒใƒผใƒ ใƒฉใƒœ๏ผ‰็ด„60ๅˆ†\r\n โ‘ฆCode for Japan Summit 2016ๅ‚ๅŠ ่€…ๅ‹Ÿ้›†๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n โ‘งWorld Data Viz Challenge 2016 2nd Stageๅ ฑๅ‘Š๏ผˆ่ฅฟ่ฐท๏ผ‰\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1856355607919283/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-21st-meeting--AdpWFfqWL9ubh7miw8fBrzHlAQ-UewtDeWCdf4bC3YruQIY6)\r\n/ Links -\r\n\r\nagenda\r\n\r\n+ โ‘ ไธญๅญฆใƒป้ซ˜ๆ กใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ็š„ๆ€่€ƒๆ•™ๆไฝœๆˆ๏ผˆๅ‰็”ฐ๏ผ‰็ด„20ๅˆ†\r\n+ โ‘กCoderDojo็ฅžๆˆธ็ดนไป‹๏ผ†ใƒกใƒณใ‚ฟใƒผๅ‹Ÿ้›†๏ผˆๆจชๅฑฑ๏ผ‰็ด„10ๅˆ†\r\n+ โ‘ขไธ‰็”ฐใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ‘Š็Ÿฅ๏ผˆ้ซ˜ๆฉ‹๏ผ‰็ด„20ๅˆ†\r\n+ โ‘ฃใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒช้€ฒๆ—ๅ ฑๅ‘Š๏ผˆๅพŒ่—ค๏ผ‰็ด„10ๅˆ†\r\n+ โ‘คใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใฎๆ–ฐใŸใช้€ฒๅŒ–ๅฝข-ๆ–ฐใ—ใ„ๆœชๆฅใฎๅ‰ตใ‚Šๆ–น-๏ผˆ500 Startups, US-Japan Liaison Officer ๅฑฑไธ‹๏ผ‰็ด„10ๅˆ†\r\n+ โ‘ฅRESAS APIๅ…ฌ้–‹๏ผˆๅบŠไธฆfromใƒใƒผใƒ ใƒฉใƒœ๏ผ‰็ด„60ๅˆ†\r\n+ โ‘ฆCode for Japan Summit 2016ๅ‚ๅŠ ่€…ๅ‹Ÿ้›†๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n+ โ‘งWorld Data Viz Challenge 2016 2nd Stageๅ ฑๅ‘Š๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n\r\n# ไธญๅญฆใƒป้ซ˜ๆ กใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ็š„ๆ€่€ƒๆ•™ๆไฝœๆˆ\r\n\r\nๅ‰็”ฐๅ…ˆ็”Ÿ\r\n\r\nใƒ–ใƒญใƒƒใ‚ฏใƒใ‚งใƒผใƒณใงๅญฆใณใฎ็ณป่ญœใƒปๅฑฅๆญดใŒ๏ผŸ\r\n\r\nใ€Œใƒซใƒผใƒ–ใƒชใƒƒใ‚ฏใ€\r\n\r\n[Knowledge Connector](http://www.meti.go.jp/press/2014/11/20141107002/20141107002.html)\r\n\r\nๆ•™็ง‘ใƒปๅญฆๆ กๆฎต้šŽ้–“ใฎใคใชใŒใ‚Šใ‚’่€ƒๆ…ฎใ—ใŸใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ\r\n\r\n- ๆ•™่‚ฒใ‚’ๅญฆๆ กใพใ‹ใ›ใซใ—ใˆใฆใŠใ“ใ†ๆ™‚ไปฃใงใฏใชใ„ ๅธ‚ๆฐ‘ๅ‚ๅŠ ๅž‹\r\n- [ใ‚นใƒฉใ‚คใƒ‰่ณ‡ๆ–™](https://drive.google.com/file/d/0B3x_amEfMljRREpLeWY0NjByRFk/view?usp=drive_web)\r\n- ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใ‚’ๅญฆใถใŸใ‚ใฎ่‰ฏ่ณชใชๆ•™ๆใ‚’ไฝœใ‚ŠใŸใ„\r\n\r\nใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐๆ•™่‚ฒใฎๅธ‚ๆฐ‘ๅ‚็”ป\r\n\r\nๅƒ่‘‰็œŒๆŸๅธ‚ CoderDojo ใงใฏๆ•™่‚ฒๅง”ๅ“กไผšใง CoderDojo ใจ้€ฃๆบใ™ใ‚‹ใ“ใจใŒๆฑบใพใฃใฆใ„ใ‚‹ใ€‚\r\n<script async class=\"speakerdeck-embed\" data-id=\"9602362cc819468bac349a547dd89424\" data-ratio=\"1.33333333333333\" src=\"//speakerdeck.com/assets/embed.js\"></script>\r\n\r\n+ Q:ใชใœใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐๆ•™่‚ฒใŒๅคงไบ‹ใ‹๏ผŸไฝ•ใ‚’้›ใˆใ‚‹ใฎใ‹๏ผŸ\r\n+ ๅ‰็”ฐ๏ผšๆŒ่ซ–ใงใ™ใŒใ€ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใงๅŽŸ็†ใ‚’็Ÿฅใ‚‹ใ“ใจใจใ€ใ‚ณใƒณใƒ”ใƒฅใƒผใ‚ฟใซ่งฆใ‚Œใ‚‹ใ“ใจใ€‚ๅฐๅญฆ็”Ÿใงใฏ็”ฐๆคใˆ็ตŒ้จ“ใ‚‚ใ™ใ‚‹ใฎใจๅŒๆง˜\r\n+ Q:ๅ…ฅๅฃใฏใจใ‚‚ใ‹ใใ€ไฝ•ใ‚’้›ใˆใฆใ„ใ‚‹ใฎใ‹๏ผŸใŒๆ›–ๆ˜งใงใฏใชใ„ใ‹\r\n\r\n# CoderDojo็ฅžๆˆธ็ดนไป‹๏ผ†ใƒกใƒณใ‚ฟใƒผๅ‹Ÿ้›†\r\nCoderDojo ็ฅžๆˆธ ไปฃ่กจ ๆจชๅฑฑใ•ใ‚“\r\n\r\n- 2016/09/14 ่จญ็ซ‹\r\n- ใƒกใƒณใ‚ฟใƒผๅ‹Ÿ้›†ไธญ๏ผ๏ผˆ็พๅœจ๏ผ™ๅ๏ผ‰\r\n- ็ฅžๆˆธๅธ‚้’ๅฐ‘ๅนดไผš้คจใง้–‹ๅ‚ฌไบˆๅฎšใ€‚ ๅ ดๆ‰€๏ผšhttp://kobe-youthhall.jp/?cat=8\r\n- CoderDojoใ‚’ๅˆใ‚ใฆ่žใไบบ๏ผŸ->็ตๆง‹ๅคšใ„\r\n\r\n็‰นๅพด\r\n: ๅญไพ›ใŸใกใŒไฝœใ‚ŠใŸใ„ใ‚‚ใฎใ‚’ใ‚ตใƒใƒผใƒˆใ™ใ‚‹\r\n: ใ‚ซใƒชใ‚ญใƒฅใƒฉใƒ ใƒ‰ใƒชใƒ–ใƒณใงใฏใชใ„\r\n\r\n- 2011ๅนดใ€ใ‚ขใ‚คใƒซใƒฉใƒณใƒ‰็™บ็ฅฅ\r\n- 66ใ‚ซๅ›ฝ\r\n- 22้ƒฝ้“ๅบœ็œŒ60้“ๅ ด\r\n\r\n็ฅžๆˆธใฏ่ฐทไธŠใจใ‹\r\n\r\nใƒกใƒณใ‚ฟใƒผใซๅ‘ใไบบ\r\n: ๅญไพ›ใŒๅฅฝใ\r\n: ๆ•™ใˆใ‚‹ใฎใŒๅฅฝใ\r\n: ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใŒๅฅฝใ\r\n\r\n11ๆœˆ13ๆ—ฅ(ๆ—ฅ) 14:00-16:00 ้“ๅ ด้–‹ใใพใ™\r\n: ่ฆ‹ๅญฆๆž 2ๅธญ\r\n: ใ‚ขใ‚ทใ‚นใ‚ฟใƒณใƒˆๅˆถๅบฆ๏ผšใƒกใƒณใ‚ฟใƒผใฎ็›ฎใซใชใ‚‹ใ€‚ใคใ„ใฆใ„ใ‘ใฆใชใ„ๅญไพ›ใ€ๅ›ฐใฃใฆใ„ใ‚‹ๅญไพ›ใ‚’ใฟใคใ‘ใฆใƒกใƒณใ‚ฟใƒผใซไผใˆใ‚‹ใ€‚ๆ“ไฝœๆ–นๆณ•ใฎ่ฃœๅŠฉใ€‚\r\n\r\n11ๆœˆ26ๆ—ฅ(ๅœŸ) KIITO\r\n็‰น่จญ้“ๅ ดใ‚„ใ‚Šใพใ™\r\n\r\nใ‚ตใ‚คใƒˆใฏใ“ใกใ‚‰ <http://coderdojokobe.wixsite.com/home>\r\n\r\n# ไธ‰็”ฐใƒใƒฃใƒฌใƒณใ‚ธใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚นๅ‘Š็Ÿฅ\r\nไธ‰็”ฐๅธ‚ ้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\n[COG2016](http://park.itc.u-tokyo.ac.jp/padit/cog2016/)\r\n\r\nไธ‰็”ฐๅธ‚ COG2016 ใฎใŠ้กŒใฏ๏ผš\r\n: ้˜ฒ็ฝ\r\n: ใ‚ทใƒ†ใ‚ฃใ‚ปใƒผใƒซใ‚น\r\n\r\nใฉใ‚“ใชใ‚ขใƒ—ใƒชใŒ้ŽๅŽปใ‚ใฃใŸใ‹ใจใ„ใ†ใจใ€ไพ‹ใˆใฐ\r\n[GENSAI QUEST](http://www.slideshare.net/yoit/gensaiquest)\r\n\r\n- Q.ๆณ•ไบบใงใฎๅฟœๅ‹Ÿใฏๅฏ่ƒฝ๏ผŸ(ๅคงๆดฅใ•ใ‚“)\r\n- A.ๅ•ใ„ๅˆใ‚ใ›ใพใ™(็ฅžๆˆธๅคงๅญฆ๏ผŸใซ)\r\n\r\n- ไธ‰็”ฐๅธ‚ใฎใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใฏใใ‚ŒใปใฉPDFใฐใ‹ใ‚Šใงใฏใชใ„ใ‚ˆ(kawaiใ•ใ‚“)\r\n\r\n- ใพใšใฏOSMใซใ„ใ‚ŒใŸใ‚‰๏ผŸ(ๆฆŽใ•ใ‚“)\r\n- ็ทฏๅบฆ็ตŒๅบฆๆƒ…ๅ ฑใŒ็„กใ„ใƒ‡ใƒผใ‚ฟใŒๅคšใ„ใ€‚\r\n\r\n# ใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒช้€ฒๆ—ๅ ฑๅ‘Š\r\nๅพŒ่—ค ใ•ใ‚“\r\n\r\nๅธ‚็”บๆ‘ใฎ HP ใซใ‚ซใƒฌใƒณใƒ€ใƒผใŒ่ผ‰ใฃใฆใ„ใ‚‹ใฎใฏใ€ใ‚ขใƒซใ‚ขใƒซไบ‹ๆกˆใ€‚ใ“ใ‚Œใ‚’ใ‚ขใƒ—ใƒชใซใ™ใ‚‹ใ€‚iCalendar ใซใ™ใ‚‹ใ€‚\r\n้››ๅฝขใ‚ขใƒ—ใƒชใ‚’ๅ…ฌ้–‹ไบˆๅฎš\r\n\r\n\r\n# ใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใฎๆ–ฐใŸใช้€ฒๅŒ–ๅฝข-ๆ–ฐใ—ใ„ๆœชๆฅใฎๅ‰ตใ‚Šๆ–น-\r\n500 Startups, US-Japan Liaison Officer ๅฑฑไธ‹ใ•ใ‚“\r\n\r\nใ‚คใƒŽใƒ™ใƒผใ‚ทใƒงใƒณใฎใƒ‘ใ‚ฟใƒผใƒณ \r\n็”ปๆœŸ็š„ใช้Šƒใ‚’็™บๆ˜Ž \r\n็”ปๆœŸ็š„ใช้Šƒใฎไฝฟใ„ๆ–นใ‚’็ทจใฟๅ‡บใ™ \r\n้Šƒใฎใƒใƒชใ‚จใƒผใ‚ทใƒงใƒณใ‚’ไฝœใ‚‹ \r\n\r\nUSA ใฏ M&A ใŒๅคงๅŠใ€‚ๆ—ฅๆœฌใฏ 8 ๅ‰ฒไปฅไธŠใŒ IPO ใ‚’็›ฎๆŒ‡ใ—ใฆใ„ใ‚‹ใ€‚ใ“ใ‚Œใฏๅ•้กŒใซใชใ‚‹ใ ใ‚ใ†ใ€‚\r\nใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—ใจๅคงไผๆฅญใฎ้€ฃๆบใŒ้€ฒใ‚“ใงใ„ใ‚‹ใ€‚\r\nใจใฏใ„ใˆใ†ใพใใ„ใ‹ใชใ„ไบ‹ใ‚‚ๅคšใ„ใ€‚\r\n\r\n\r\n# RESAS APIๅ…ฌ้–‹\r\nใƒใƒผใƒ ใƒฉใƒœ ๅบŠไธฆใ•ใ‚“\r\n\r\n็ด„70ใฎ็ตฑ่จˆใƒ‡ใƒผใ‚ฟใ‚’ใƒ“ใ‚ธใƒฅใ‚ขใƒฉใ‚คใ‚บใ™ใ‚‹ใƒ„ใƒผใƒซ\r\n\r\nRESASใฏใ‚ใใพใงใ‚‚\"่พžๆ›ธ\"\r\n\r\n- RESAS ใฏ็œๅบใŒๅ‡บใ—ใฆใ„ใ‚‹ใƒ‡ใƒผใ‚ฟใ ใ‘ใงใฏใชใ„ใ€‚NAVITIME ใฎใ€Œใ‚คใƒณใƒใ‚ฆใƒณใƒ‰GPSใƒ‡ใƒผใ‚ฟใ€ใ‚‚ๅ…ฅใฃใฆใ„ใ‚‹ใ€‚ๅธๅ›ฝใƒ‡ใƒผใ‚ฟใƒใƒณใ‚ฏใŒไธญๅฟƒใซใชใฃใฆใพใจใ‚ใŸใ€‚\r\n<http://corporate.navitime.co.jp/topics/pr/201509/11_3262.html>\r\n- 2016 / 11 ๅˆๆ—ฌใซ API ใŒใƒชใƒชใƒผใ‚นใ•ใ‚Œใ€ใใ‚ŒใจๅŒๆ™‚ใซ API ใ‚ณใƒณใƒ†ใ‚นใƒˆใฎๅ‹Ÿ้›†ใŒใ‹ใ‹ใ‚‹ใ€‚\r\n\r\nใ„ใพใฎใจใ“ใ‚SEOๅผฑใ„\r\n\r\n- Q: <https://datausa.io/> ใงใฏใƒ‡ใƒผใ‚ฟใ‚’ใ‚นใƒˆใƒผใƒชใƒผใซใ—ใฆ่กจ็คบใ—ใฆใ„ใ‚‹ใ€‚ใ‚ทใƒŠใƒชใ‚ชใ€่ชญใฟใ‚‚ใฎ็š„ใชใพใจใพใ‚ŠใŒใปใ—ใ„ใจๆ€ใ†ใ€‚\r\n- A: RESASใฏๅ›ฝใŒๆไพ›ใ—ใฆใ„ใ‚‹ใ‚‚ใฎใชใฎใงใ‚นใƒˆใƒผใƒชใƒผใจใ„ใฃใŸใ€Œๅˆคๆ–ญใ€ใ‚’ไบคใˆใ‚‹ใฎใฏ้ฟใ‘ใŸใ„ใจ่€ƒใˆใฆใ„ใ‚‹ใ‚ˆใ†ใ ใ€‚็ฅžๆˆธๅธ‚ใชใฉๅœฐๅŸŸ่กŒๆ”ฟใŒใ€ใƒ‡ใƒผใ‚ฟใ‚’ใ‚นใƒˆใƒผใƒชใƒผใซใ—ใฆๆไพ›ใ—ใฆใ„ใใฎใ‚‚ใ„ใ„ใ‹ใ‚‚ใ—ใ‚Œใชใ„ใ€‚\r\n- Q:ใ‚ชใƒผใƒ—ใƒณใ‚ฝใƒผใ‚นๅŒ–ใ‚’ๆคœ่จŽใ•ใ‚Œใฆใ„ใ‚‹ใ‹๏ผŸ\r\n- A:ๆœฌไฝ“ใ‚„APIใ‚ตใƒณใƒ—ใƒซใ‚ณใƒผใƒ‰ใ‚’Githubใซ็ฝฎใ„ใฆๅ…ฌ้–‹ใ™ใ‚‹ใ“ใจใฏๆคœ่จŽใ—ใฆใ„ใ‚‹\r\n" }, { "alpha_fraction": 0.60849529504776, "alphanum_fraction": 0.7261070013046265, "avg_line_length": 52.25, "blob_id": "cbaccf530467167ecdb6ad100ff66bc831813e9c", "content_id": "ace92fded724505428ec85fddb3fa2c3cb032364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6159, "license_type": "no_license", "max_line_length": 158, "num_lines": 92, "path": "/index.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: default\r\n---\r\n\r\nCode for Kobe ใฏใ€ใ€ŒITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ใงใ™ใ€‚\r\n\r\n[Code for Japan ใฎ Brigade](http://www.code4japan.org/brigade) ใซ็™ป้Œฒใ•ใ‚Œใฆใ„ใฆใ€็ฅžๆˆธใ‚’ไธญๅฟƒใซๆดปๅ‹•ใ—ใฆใ„ใ‚‹ไปปๆ„ๅ›ฃไฝ“ใงใ™ใ€‚\r\n\r\n* [Code for Kobe](https://www.facebook.com/codeforkobe) : ใ‚คใƒ™ใƒณใƒˆๅ‘Š็ŸฅใชใฉใŒๆŽฒ่ผ‰ใ•ใ‚Œใ‚‹ Facebook page ใงใ™ใ€‚ๅ‚ๅŠ ใฏใ“ใกใ‚‰ใ‹ใ‚‰ใฉใ†ใžใ€‚\r\n* [Code for Kobe Connect](https://www.facebook.com/groups/1536379276600668/)\r\nไธŠ่จ˜ใฎ Facebook page ใจใฏๅˆฅใซใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒกใƒณใƒๅŒๅฃซใฎๆƒ…ๅ ฑไบคๆ›็”จใฎใ‚ฐใƒซใƒผใƒ—ใŒใ‚ใ‚Šใพใ™ใ€‚\r\nใ“ใกใ‚‰ใฏๅฎšไพ‹ไผšใซๅ‚ๅŠ ใ—ใŸไบบใŒๅ‚ๅŠ ใงใใพใ™ใ€‚\r\n* [Code of Conduct](CODE_OF_CONDUCT.html) : ไธ€่ˆฌ็š„ใซใ€Œ่กŒๅ‹•่ฆ็ฏ„ใ€ใจๅ‘ผใฐใ‚Œใ‚‹ๆ–‡ๆ›ธใงใ™ใŒใ€\r\nใใ‚“ใชใซๅ …่‹ฆใ—ใ„ใ‚‚ใฎใงใฏใชใใ€ๅฝ“ใŸใ‚Šๅ‰ใฎใ“ใจใŒๅฝ“ใŸใ‚Šๅ‰ใงใ‚ใ‚‹ใ‚ˆใ†ใซใ€ใจใฎๆ€ใ„ใงๆ›ธใ„ใฆใ‚ใ‚Šใพใ™ใ€‚\r\n* [twitter](https://twitter.com/codeforkobe) : ๏ผˆๆฐ—ใพใใ‚Œ้‹็”จไธญ๏ผ‰\r\n* [hackmd](https://hackmd.io/s/S1sujZKzG) : ๅฎšไพ‹ไผšใชใฉใฏใ€ๅ‚ๅŠ ๅž‹ใงใƒกใƒขใ‚’ๅ–ใฃใฆใ„ใพใ™ใ€‚\r\n* [Slack](https://codeforkobe.slack.com) : ใƒใƒฃใƒƒใƒˆ\r\n* [wiki](https://github.com/codeforkobe/codeforkobe.github.io/wiki)\r\n* Code for Kobe [ใƒญใ‚ด่ฆๅฎšๆ›ธ](http://codeforkobe.github.io/logo/cfk_logo_spec.pdf) / [Illustrator file](http://codeforkobe.github.io/logo/cfk_logo_fix.ai)\r\n\r\n\r\n# ๅฎšไพ‹ไผšใƒกใƒข\r\n\r\nๆœˆๆฌกใงๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใฆใ„ใพใ™ใ€‚ใ€Œใ“ใ‚“ใชใ“ใจใ‚„ใฃใฆใ‚‹ใ‚ˆใ€ใ€Œใ“ใ‚“ใชใฎใฉใ†ใ‹ใช๏ผŸใ€ใจใ„ใ†่ฉฑใ‚’ใ—ใฆใ„ใพใ™ใ€‚\r\nๆœ€่ฟ‘ใฏ[hackmd](https://hackmd.io/c/S1sujZKzG)ใงใ€ใฟใ‚“ใชใงใƒกใƒขใ‚’ๅ–ใ‚‹ใ‚นใ‚ฟใ‚คใƒซใงใ‚„ใฃใฆใ„ใพใ™ใ€‚\r\n\r\n<ul>\r\n{% for post in site.data.logs %}\r\n<li><a class=\"post-link\" href=\"{{ post.url }}\">{{ post.title }}</a></li>\r\n{% endfor %}\r\n</ul>\r\n\r\n# ใ‚ขใƒผใ‚ซใ‚คใƒ–\r\n\r\n<ul>\r\n{% for post in site.posts %}\r\n<li>\r\n <a class=\"post-link\" href=\"{{ post.url }}\">{{ post.title }}</a> ---\r\n <span class=\"post-meta\">{{ post.date | date: \"%Y-%m-%d\" }}</span>\r\n</li>\r\n{% endfor %}\r\n</ul>\r\n\r\n* [Google for nonprofits ๅพŒใƒ—ใƒไบคๆตไผš](https://www.facebook.com/events/1558468817788579/) 2016-07-23\r\n* [18ๅ›žๅฎšไพ‹](https://paper.dropbox.com/doc/Code-for-Kobe-18th-meeting--ADW2FWrO2Q~OxtRfCSOuXrzGAQ-K9yESFpFkJUkK3IATnD2o) 2016-07-21\r\n* 17ๅ›žๅฎšไพ‹ 2016-06-16\r\n* [16ๅ›žๅฎšไพ‹](https://paper.dropbox.com/doc/Code-for-Kobe-16th-Meeting-fZGe1UhcNNLsqKVFhpGQ1) 2016-05-19\r\n* [15ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/15ๅ›žๅฎšไพ‹) 2016-04-21\r\n* [14ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/14ๅ›žๅฎšไพ‹) 2016-03-17\r\n* [13ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/13ๅ›žๅฎšไพ‹) 2016-02-25\r\n* [12ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/12ๅ›žๅฎšไพ‹) 2016-01-21\r\n* [11ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/11ๅ›žๅฎšไพ‹) 2015-12-17\r\n* [10ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/10ๅ›žๅฎšไพ‹) 2015-11-19\r\n* [09ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/09ๅ›žๅฎšไพ‹) 2015-10-15\r\n* [NTT docomo APIๅ‹‰ๅผทไผš&็ฅžๆˆธๅธ‚ใ‚ขใƒ—ใƒชใ‚ณใƒณใƒ†ใ‚นใƒˆ ใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณ](https://www.facebook.com/events/743396319102136/) 2015-09-19\r\n * <http://www.city.kobe.lg.jp/information/opendata/contest.html>\r\n * <https://dev.smt.docomo.ne.jp>\r\n* [08ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/08ๅ›žๅฎšไพ‹) 2015-09-17\r\n* [็ฌฌ1ๅ›ž LOD Challenge Day KOBE 2015](http://peatix.com/event/109163) 2015-09-12\r\n * <http://www.slideshare.net/KoujiKozaki/linked-datalod>\r\n * <http://www.slideshare.net/KoujiKozaki/csvlod>\r\n* [07ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/07ๅ›žๅฎšไพ‹) 2015-08-19\r\n* [็ฅžๆˆธAEDใƒžใƒƒใƒ— ใƒชใƒชใƒผใ‚น](http://ponpoko1968.hatenablog.com/entry/2015/07/26/182108)\r\n* [๏ผˆๅฐ้ขจใฎใŸใ‚ๅฎšไพ‹ๅปถๆœŸ๏ผ‰](https://www.facebook.com/events/1467406310242049/) 2015-07-16\r\n* [IBM Bluemixๅ‹‰ๅผทไผš](https://www.facebook.com/events/1425423084450308/) 2015-07-15\r\n* ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟ่‡ชๆฒปไฝ“ใ‚ตใƒŸใƒƒใƒˆ 2015-06-24\r\n * <http://www.soumu.go.jp/soutsu/kanto/press/27/0520re-2.html>\r\n * <http://yokohamalab.jp/2015/05/20/opendatasummit/>\r\n * <http://yokohamalab.jp/2015/07/14/ojs624_slide/>\r\n* [06ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/06ๅ›žๅฎšไพ‹) 2015-06-23\r\n* [05ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/05ๅ›žๅฎšไพ‹) 2015-05-21\r\n* [04ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/04ๅ›žๅฎšไพ‹) 2015-04-16\r\n* [ใ‚ทใƒ“ใƒƒใ‚ฏใƒ†ใƒƒใ‚ฏใƒ•ใ‚ฉใƒผใƒฉใƒ 2015](http://civictechforum2015.peatix.com/) 2015-03-29\r\n* [03ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/03ๅ›žๅฎšไพ‹) 2015-03-19\r\n* [Code For Kobeๆกˆๅ†…็”จ่จ˜ไบ‹](http://blog.yukiohyama.com/2015/03/15/feel-codeforkobe/)\r\n* [02ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/02ๅ›žๅฎšไพ‹) 2015-02-19\r\n* [International Open Data Day in ๅ…ตๅบซใƒป็ฅžๆˆธ](http://codeforkobe-3.peatix.com/) 2015-02-21\r\n* [้–ข่ฅฟใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟEXPO '15](http://expo15.theodi.jp/) 2015-02-11\r\n * <https://www.facebook.com/codeforkobe/posts/1566373083609087>\r\n* [Android Project Ara ใจใ‚‚ใฎใฅใใ‚Šใฎๆœชๆฅ](http://codeforkobe-2.peatix.com/) 2015-02-05\r\n* [ๅฒกๆœฌๅ•†ๅบ—่ก— iBeacon ใ‚ขใ‚คใƒ‡ใ‚ขใ‚ฝใƒณ](http://codeforkobe-1.peatix.com/) 2015-01-24\r\n* [01ๅ›žๅฎšไพ‹](https://github.com/codeforkobe/codeforkobe.github.io/wiki/01ๅ›žๅฎšไพ‹) 2015-01-15\r\n* ๅฏ่ฆ–ๅŒ–ๅ‹‰ๅผทไผš 2014-12-28\r\n * <https://www.facebook.com/codeforkobe/posts/1545890415657354>\r\n * <http://www.slideshare.net/atsuhikoyasuda1/code-for-kobe>\r\n * <http://www.slideshare.net/atsuhikoyasuda1/d3js-code-for-kobe>\r\n * <http://bouzuya.hatenablog.com/entry/2014/12/28/235959>\r\n* [ใ‚ญใƒƒใ‚ฏใ‚ชใƒ•](https://github.com/codeforkobe/codeforkobe.github.io/wiki/ใ‚ญใƒƒใ‚ฏใ‚ชใƒ•) 2014-12-10\r\n\r\n\r\n## ใƒชใƒณใ‚ฏ้›†\r\n* [็ฅžๆˆธๅธ‚ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟ Facebook](https://www.facebook.com/%E7%A5%9E%E6%88%B8%E5%B8%82%E3%82%AA%E3%83%BC%E3%83%97%E3%83%B3%E3%83%87%E3%83%BC%E3%82%BF-1015998728412821/)\r\n" }, { "alpha_fraction": 0.6245614290237427, "alphanum_fraction": 0.7214034795761108, "avg_line_length": 20.58730125427246, "blob_id": "e2576a39220b4eb7eeefac8bdb465360dfc2774e", "content_id": "3ce8caa20cad5c4daf5294b535e5d4edf26bad4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2221, "license_type": "no_license", "max_line_length": 122, "num_lines": 63, "path": "/_posts/2017-04-20-meeting27.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš27th\r\ndate: 2017-04-20 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n Code for Japan Summit 2017ๆบ–ๅ‚™ๅง”ๅ“กไผš\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/2021733168053886/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-27th-meeting--AdqSSor7yfp15QZc~cmRV4F2AQ-JnohqxUA2qaNwf73DRGS2)\r\n/ Links: -\r\n\r\nๅ ดๆ‰€:[ใ‚นใƒšใƒผใ‚นใ‚ขใƒซใƒ•ใ‚กไธ‰ๅฎฎ](http://www.spacealpha.jp/sannomiya/access.html)\r\n\r\nใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n+ ใ€œไนพๆฏใ€œ\r\n+ Code for Japan Summit 2017ๆบ–ๅ‚™ๅง”ๅ“กไผš\r\n+ 19:00๏ฝž19:05 ๆŒจๆ‹ถ๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n+ 19:05๏ฝž19:10 ใ‚ญใƒƒใ‚ฏใ‚ชใƒ•ๆŒฏใ‚Š่ฟ”ใ‚Š๏ผˆ้™ฃๅ†…๏ผ‰\r\n+ 19:10๏ฝž19:45 ใƒ†ใƒผใƒžๆฑบใ‚\r\n+ 19:45๏ฝž20:15 ใ‚ปใƒƒใ‚ทใƒงใƒณๆคœ่จŽ\r\n+ 20:15๏ฝž20:30 ๅ„ใƒใƒผใƒ ใ‹ใ‚‰๏ผˆใƒ‡ใ‚ถใ‚คใƒณใ€ใƒกใƒ‡ใ‚ฃใ‚ขใ€ใƒ—ใƒฌใƒ—ใƒญใ‚ฐใƒฉใƒ ใ€ๅฝ“ๆ—ฅ้‹ๅ–ถ๏ผ‰\r\n+ 20:30๏ฝž20:45 ใ‚นใƒใƒณใ‚ตใƒผใซใคใ„ใฆ\r\n+ 20:45๏ฝž21:00 ไปŠๅพŒใฎใ‚ขใ‚ฏใ‚ทใƒงใƒณ\r\n+ ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n# Code for Japan SUMMIT ใซใคใ„ใฆ\r\n้™ฃๅ†…ใ•ใ‚“๏ผ†่ฅฟ่ฐทใ•ใ‚“\r\n\r\nๅŸบๆœฌ็š„ใซใฏใ‚นใƒฉใ‚คใƒ‰ใฎๅ†…ๅฎนใ‚’่ชฌๆ˜Ž <https://docs.google.com/presentation/d/1wMlsPY4-r0MvRh8EImiOu5SfWKRivGgMvYBXWmoZSBk/>\r\n\r\n## ใƒ†ใƒผใƒžๆคœ่จŽ\r\nๆๆกˆ๏ผšใ€Œใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใจใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ‡ใ‚ถใ‚คใƒณใ€\r\n\r\nใ—ใ‚ใ‚ใ›ใฎๆ‘ใฏใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ‡ใ‚ถใ‚คใƒณใซๅŠ›ใ‚’ๅ…ฅใ‚Œใฆใ„ใ‚‹ใ€‚\r\n\r\nๅ„ใƒ†ใƒผใƒ–ใƒซใซๅˆ†ใ‹ใ‚Œใฆใ€ใ‚คใƒใ‚ชใ‚ทใฎใƒ†ใƒผใƒžใ‚’่ฉฑใ—ๅˆใฃใฆๆกˆๅ‡บใ—ใ€‚ใƒ†ใƒผใƒ–ใƒซใ”ใจใซๅ‡บใฆใใŸๆกˆใฏ๏ผš\r\n\r\n- DESIGN4* : DESIGN FOR ASTERISK\r\n- (ใชใ—)\r\n- ใ—ใ‚ใ‚ใ›\r\n- PILOT\r\n- U-DESIGN : I ใ‚’ใƒใƒผใƒˆใ‚ฟใƒฏใƒผใซใ—ใฆใ‚‚ใ„ใ„ใชใ€‚U-COLOR\r\n- UNIVERSAL\r\n- BORDERLESS <=๏ผˆๆฑบๅฎš!๏ผ‰\r\n\r\n## ใ‚ปใƒƒใ‚ทใƒงใƒณๆคœ่จŽ\r\n\r\nใƒใƒฉใƒณใ‚นใฏๅพŒใง่ชฟๆ•ดใ™ใ‚‹ใจใ—ใฆใ€ใจใ‚Šใ‚ใˆใšๆ€ใ„ไป˜ใใƒ™ใƒผใ‚นใงใƒชใ‚นใƒˆๅŒ–ใ™ใ‚‹ๅคงไผš\r\n<https://docs.google.com/spreadsheets/d/1rEDzpqSOUJjEACBHeybWHVar9xevDuzH9ZuupbKL8wk/>\r\n\r\n- ใ‚ณใƒณใ‚ฟใ‚ฏใƒˆใƒชใ‚นใƒˆ\r\n- ใ‚ปใƒƒใ‚ทใƒงใƒณๆคœ่จŽ\r\n - ็†ๅŒ–ๅญฆ็ ”็ฉถๆ‰€ / ๅœฐๅŸŸใจใฎ้€ฃๆบ่ญฐ่ซ–ใ‚’C4JใจๆททใœใŸใ„๏ผˆ่–ฌๅธซๅฏบใ•ใ‚“๏ผ‰\r\n\r\n# ใใฎไป–ๅ‘Š็Ÿฅ\r\nๅ…ƒ็”บใƒญใƒœใƒƒใƒˆ๏ผ†Fabใ‚นใƒšใƒผใ‚น <https://www.facebook.com/events/1363944723652878/> ้–‹ใใพใ™๏ผ\r\n\r\n" }, { "alpha_fraction": 0.6937702298164368, "alphanum_fraction": 0.7342233061790466, "avg_line_length": 16.16176414489746, "blob_id": "ec5e8a250b1e95fd02773f349f39f4e27c2590ac", "content_id": "e8e319b923e6e9db97eac1aec03684eea9a5bf78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4576, "license_type": "no_license", "max_line_length": 122, "num_lines": 136, "path": "/_posts/2017-01-19-meeting24.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš24th\r\ndate: 2017-01-19 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ใ•ใ‚“ใ ไฟ่‚ฒๅœ’ใƒžใƒƒใƒ—ใ€COG(้ซ˜ๆฉ‹)\r\n (2)ใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒชใฎ้€ฒๆ—็Šถๆณ(ๅพŒ่—ค)\r\n (3)FabLab near ็ฅžๆˆธ็ดนไป‹(ๆ˜Ž็Ÿณ)\r\n (4)ใ‚ฟใ‚นใ‚ฏใƒ•ใ‚ฉใƒผใ‚นใ€CfJๆ–ฐไฝ“ๅˆถใชใฉ(ๆฆŠๅŽŸ)\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1757652324557946/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-24th-meeting--Ado3Zm_w2UQO_9rK34XHAmeuAQ-tbe1fnUQ0uVGshMIhNXYV)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“ใƒฌใƒใƒผใƒˆ](http://masaki-ravens.com/main/blog/everythingispractice/?p=1170)\r\n\r\nagenda:\r\n\r\n+ (1)ใ•ใ‚“ใ ไฟ่‚ฒๅœ’ใƒžใƒƒใƒ—ใ€COG(้ซ˜ๆฉ‹)\r\n+ (2)ใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒชใฎ้€ฒๆ—็Šถๆณ(ๅพŒ่—ค)\r\n+ (3)FabLab near ็ฅžๆˆธ็ดนไป‹(ๆ˜Ž็Ÿณ)\r\n+ (4)ใ‚ฟใ‚นใ‚ฏใƒ•ใ‚ฉใƒผใ‚นใ€CfJๆ–ฐไฝ“ๅˆถใชใฉ(ๆฆŠๅŽŸ)\r\n\r\n\r\n# ใ•ใ‚“ใ ไฟ่‚ฒๅœ’ใƒžใƒƒใƒ—ใ€COG\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\nCOG\r\n\r\n- ็ฅžๆˆธๅธ‚๏ผš้œ‡็ฝใฎ็”ปๅƒ\r\n - ใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ \r\n - ใ„ใ‚ใ„ใ‚ใชไบบใŒ่ฆ‹ใ‚Œใ‚‹ใ‚ˆใ†ใซ\r\n - ใ€Œ็ฎฑใ€ใŒใ‚ใ‚Œใฐใ„ใ‚ใ„ใ‚ๅ…ฅใ‚Œใ‚‹ใ“ใจใŒใงใใ‚‹\r\n- ไธ‰็”ฐๅธ‚๏ผš้˜ฒ็ฝ\r\n - ๅธ‚ๆฐ‘ๅดใ‹ใ‚‰็Šถๆณใ‚’็™บไฟกใงใใ‚‹ใ‚ˆใ†ใซ\r\n - ใƒกใƒผใƒซใซGoogleFormใฟใŸใ„ใชใ‚‚ใฎใ‚’ใ„ใ‚Œใ‚‹\r\n- ไธ‰็”ฐๅธ‚๏ผšCitySales\r\n - ๅธ‚ๆฐ‘ใŒใพใกใซใคใ„ใฆ่€ƒใˆใ‚‹ๆฉŸไผšใ‚’ใคใใ‚ใ†\r\n - ใ‚ฒใƒผใƒ ใ‚’้€šใ˜ใฆ่ก—ใซใคใ„ใฆๅญฆใถใ€‚ไบˆ็ฎ—ใ‚’ไฝฟใˆใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใ€‚\r\n\r\nไธ‰็”ฐๅธ‚ ใ€Œไฟ่‚ฒๅœ’ใƒžใƒƒใƒ—๏ผˆCodeForHokkaido fork๏ผ‰ใ€\r\n\r\nhttps://mune0323.github.io/papamama-1\r\n\r\nใƒ‡ใƒผใ‚ฟใ‚’ๅ…ฅใ‚Œๆ›ฟใˆใ‚‹ใ ใ‘ใชใ‚‰ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ็Ÿฅใ‚‰ใชใใฆใ‚‚ใงใใ‚‹โ‡’ใ‚„ใฃใฆใฟใพใ—ใ‚‡ใ†\r\n\r\n- ใƒ‡ใƒผใ‚ฟใฎ้€š่จณ่€…ใŒๅฟ…่ฆ\r\n- ใƒ‡ใƒผใ‚ฟใฎใ‚ณใƒ”ใƒšใฏใงใใชใ„ใจใคใ‚‰ใ„\r\n- ใƒ‡ใƒผใ‚ฟใƒ™ใƒผใ‚น็š„ใช็ฎก็†ใŒๅฟ…่ฆ\r\n\r\n# ใ‚คใƒ™ใƒณใƒˆใ‚ขใƒ—ใƒชใฎ้€ฒๆ—็Šถๆณ\r\n\r\nๅพŒ่—คใ•ใ‚“\r\n\r\nใ‚คใƒ™ใƒณใƒˆใ‚ซใƒฌใƒณใƒ€ใƒผ\r\niCalendar ใƒ•ใ‚กใ‚คใƒซใฎใ‚คใƒ™ใƒณใƒˆใ‚’ๆบๅธฏใง่กจ็คบใ™ใ‚‹ใƒ‡ใƒข\r\n\r\n` codeforkobe-eventmap ` ใจใ„ใ†ใƒชใƒใ‚ธใƒˆใƒชใ€‚\r\n\r\n\r\n\r\n# FabLab near ็ฅžๆˆธ็ดนไป‹\r\n\r\nๆ˜Ž็Ÿณใ•ใ‚“\r\n\r\nใชใœใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใ‚’ใฏใ˜ใ‚ใ‚‹ไบบใŒๅข—ใˆใฆใ„ใ‚‹ใฎใ‹๏ผŸ\r\n\r\nใฟใชใ•ใ‚“ใƒใƒผใƒ‰ใชใ‚‚ใฎใฅใใ‚Šใ‚’ใ—ใฆใ„ใพใ™ใ‹๏ผŸ\r\n\r\nใ€Œใƒกใ‚คใ‚ซใƒผใƒ ใƒผใƒ–ใƒกใƒณใƒˆใ€ใจใฏ๏ผšใƒขใƒŽใฅใใ‚Šใฎใƒ‘ใƒผใ‚ฝใƒŠใƒซๅŒ–ใŒ่ตทใ“ใ‚‹ใ ใ‚ใ†ใจใ„ใ†่ฉฑใ€‚\r\n\r\n็ฅžๆˆธใงใฏ็››ใ‚ŠไธŠใŒใฃใฆใ„ใชใ„๏ผ๏ผŸ๏ผšๆ—ฅๆœฌๅ…จๅ›ฝใงใฎ่จญๅ‚™ใฎๅˆ†ๅธƒ\r\n\r\n่ชฒ้กŒ๏ผšไผๆฅญๅ‘ใ‘ใง้ซ˜ใ„ใ€‚ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใŒใชใ„๏ผˆๅ€‹ไบบๆดปๅ‹•๏ผ‰ใ€‚\r\n\r\nไบˆๆธฌ๏ผš\r\n\r\n- Fabๆ–ฝ่จญใ‚’ไฝœใ‚ŠใŸใ„ใจ่€ƒใˆใ‚‹ไบบใฏใ„ใ‚‹ใŒใ€ๅญค่ปๅฅฎ้—˜็Šถๆ…‹ใ€‚ๆœช้€ฃๆบใ€‚\r\n- ้–ขๅฟƒใฏใ‚ใฃใฆใ‚‚ๅ‹•ใๅ‡บใ™ไบบใŒใ„ใชใ„\r\n\r\nใ€ŒFab near Kobe ฮฒใ€ใ‚’ใฏใ˜ใ‚ใพใ—ใŸใ€‚ http://fablabkobe.strikingly.com/\r\n\r\nไปŠๅพŒ๏ผšใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใจใ—ใฆใฎFabLab Kobeใ‚’่€ƒใˆใฆใพใ™\r\n\r\n้–ข่ฅฟFabๆ–ฝ่จญใŒ้€ฃๅ‹•ใ—ใŸใ‚คใƒ™ใƒณใƒˆใ‚’้–‹ๅ‚ฌใ™ใ‚‹ใ‹ใ‚‚ใƒปใƒปใƒปใƒป\r\n\r\nhttps://www.facebook.com/groups/583975355114544/\r\n\r\n้•ท็”ฐใฎใฉใฎใธใ‚“๏ผŸ http://www.siete.jp/siete/Welcome.html ใ“ใ‚Œใ‹ใช\r\nใƒ•ใ‚กใƒ–ใƒฉใƒœๆ†ฒ็ซ ใ€€http://fablabjapan.org/fabcharter/\r\n\r\n\r\n# ใ‚ฟใ‚นใ‚ฏใƒ•ใ‚ฉใƒผใ‚นใ€CfJๆ–ฐไฝ“ๅˆถใชใฉ\r\n\r\nๆฆŠๅŽŸใ•ใ‚“\r\n\r\nใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟ้–ขไฟ‚่ฟ‘ๆณๅ ฑๅ‘Š\r\n\r\n็น‹ใŒใฃใฆใชใ„ใ‚“ใ ใ‘ใฉ็น‹ใŒใฃใฆใ‚‹ใ‚ˆใ†ใช็ทฉใ„ใคใชใŒใ‚Šใฎ็Šถๆ…‹ใ€‚\r\n\r\n## ใ€Œๅฎ˜ๆฐ‘ใƒ‡ใƒผใ‚ฟ้€ฃๆบๆณ•ๆกˆใ€\r\n\r\n้ƒฝ้“ๅบœ็œŒ๏ผš็พฉๅ‹™ใ€€ๅธ‚ๅŒบ็”บๆ‘๏ผšๅŠชๅŠ›็พฉๅ‹™\r\n\r\n## ๅœฐๅŸŸIOTๅฎŸ่ฃ…ๆŽจ้€ฒใ‚ฟใ‚นใ‚ฏใƒ•ใ‚ฉใƒผใ‚น / ๅœฐๅŸŸ่ณ‡ๆบๆดป็”จๅˆ†็ง‘ไผš\r\n\r\nhttp://www.soumu.go.jp/main_sosiki/kenkyu/chiiki_iot/index.html\r\n\r\nhttp://www.soumu.go.jp/main_content/000451591.pdf\r\n\r\nใ‚ทใ‚งใ‚ขใƒชใƒณใ‚ฐใ‚จใ‚ณใƒŽใƒŸใƒผใฎ่ฉฑใ‚‚ๅ‡บใฆใ„ใ‚‹ : http://www.soumu.go.jp/main_content/000444185.pdf\r\n\r\n่กŒๆ”ฟใ‚ตใ‚คใƒ‰ใ‚‚ๆ‰‹่งฆใ‚Šๆ„Ÿใ‚’ๆฑ‚ใ‚ใฆใ„ใ‚‹ใจใ“ใ‚ใ‚‚ใ‚ใ‚‹ใฎใงใ€ใ‚ขใ‚ฆใƒˆใƒ—ใƒƒใƒˆใฎๅฝขใซใชใฃใฆใ„ใ‚‹ใ‚‚ใฎใ‚’ไฝฟใฃใฆใ€ๆ„Ÿ่ฆšใŒๅ…ฑๆœ‰ใ•ใ‚Œใฆใ„ใใ‚ˆใ†ใซใชใ‚‹ใจใ„ใ„ใชใ€‚\r\n\r\n## Code for Japan ๆ–ฐไฝ“ๅˆถ\r\n\r\n- ้–ขใ•ใ‚“\r\n- ไธ‰ๆœฌใ•ใ‚“\r\n- ๅฐๆณ‰ใ•ใ‚“ CodeforShiogama \r\n- ่—คไบ•ใ•ใ‚“๏ผˆไผšๆดฅๅคงๅญฆ๏ผ‰\r\n\r\nใ—ใฃใ‹ใ‚Šใ—ใŸไบ‹ๅ‹™ๅฑ€ใŒใงใใ‚‹ใ“ใจใซใชใฃใŸใ€‚\r\n\r\nkickoff meeting ใง 3 ๅนดๅพŒใฉใ†ใชใ‚‹ในใใ‹ใ‚’ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณใ—ใŸใ€‚\r\n\r\nCODE FOR JAPAN SUMMIT 2017 ใ‚’็ฅžๆˆธ่ช˜่‡ดใ—ใŸใ„๏ผ\r\n\r\nใ€Œใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ‡ใ‚ถใ‚คใƒณร—ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ€ใฎๅฏ่ƒฝๆ€งใ‚’ๆŽขใ‚ŠใŸใ„ใ€‚\r\n\r\nใ—ใ‚ใ‚ใ›ใฎๆ‘ใฏๅ…ƒใ€…ๅ…ˆ่กŒ่ฉฆ้จ“ใ™ใ‚‹ใƒ•ใ‚ฃใƒผใƒซใƒ‰ใ ใฃใŸใ€‚ไพ‹ใˆใฐใ€Œ็‚นๅญ—ใƒ–ใƒญใƒƒใ‚ฏใ€ใพใงใƒžใƒƒใƒ”ใƒณใ‚ฐใ—ใฆใƒŠใƒ“ใ‚ฒใƒผใ‚ทใƒงใƒณใซๅˆฉ็”จใ—ใŸใ‚Š่ฉฆใ—ใฆใ„ใ‚‹ใ€‚ๆญฉใใ‚นใƒžใƒ›OKใ€‚\r\n\r\nใ—ใ‚ใ‚ใ›ใฎๆ‘ใง CODE FOR JAPAN SUMMIT ใ‚„ใ‚ŠใŸใ„ใช\r\n\r\n" }, { "alpha_fraction": 0.7551401853561401, "alphanum_fraction": 0.7551401853561401, "avg_line_length": 21.2608699798584, "blob_id": "ce50e92a362681034d6fb39582c8103a6febbfed", "content_id": "5f89e0e867bc986df198b638df6e84bc54edc4ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 991, "license_type": "no_license", "max_line_length": 93, "num_lines": 23, "path": "/README.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "About\r\n====\r\nใ“ใฎใƒชใƒใ‚ธใƒˆใƒชใฏWebใ‚ขใƒ—ใƒชใ‚’ใƒ›ใ‚นใƒˆใ™ใ‚‹ใŸใ‚ใซใ‚ใ‚Šใพใ™ใ€‚\r\nhttp://codeforkobe.github.io/\r\n\r\nใพใŸ่ญฐไบ‹้Œฒใชใฉใฎไฟ็ฎก็”จใซ[Wiki](https://github.com/codeforkobe/codeforkobe.github.io/wiki)ใจใ—ใฆใ‚‚ไฝฟใฃใฆใ„ใพใ™ใ€‚\r\n\r\nไธ€่ˆฌ็š„ใช[ๅ•ใ„ๅˆใ‚ใ›](https://github.com/codeforkobe/codeforkobe.github.io/issues)ใ‚‚ใ€ใ“ใกใ‚‰ใฎใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆๅฎ›ใงใŠ้ก˜ใ„ใ—ใพใ™ใ€‚\r\n\r\n\r\nใƒกใƒขใ‚’ๅ–ใ‚‹ไฝ“ๅˆถใฎๅค‰้ท\r\n====\r\n- ๅ€‹ไบบใƒกใƒข\r\n- ๅ€‹ไบบใƒกใƒขใ‚’ facebook ใงๅ…ฑๆœ‰\r\n- hackpad ใง่จ˜้Œฒใ—ใฆใ„ใŸ๏ผˆ่ชฐใงใ‚‚็ทจ้›†ใงใใ‚‹ไฝ“ๅˆถใซ็งป่กŒ๏ผ‰\r\n- hackpad ใŒ dropbox ใซ่ฒทๅŽใ•ใ‚ŒใŸ๏ผˆใ‚ขใ‚ซใ‚ฆใƒณใƒˆ็ตฑๅˆใงใ‚ดใ‚ฟใ‚ดใ‚ฟ๏ผ‰\r\n- dropbox ใง่จ˜้Œฒ\r\n - robots.txt ใงๆคœ็ดขใ‚จใƒณใ‚ธใƒณใซใ‚คใƒณใƒ‡ใƒƒใ‚ฏใ‚นใ•ใ‚Œใชใ„ๅ•้กŒ\r\n- hackmd ใซ็งป่กŒ\r\n- ๏ผˆ็พๅœจ๏ผ‰\r\n\r\nๆฎ‹่ชฒ้กŒ\r\n- github ใงใฎใƒใƒƒใ‚ฏใ‚ขใƒƒใƒ—ใ‚‚ใ—ใŸใ„ใ€‚ใใฎๅ ดๆ‰€ใซใ“ใฎใƒชใƒใ‚ธใƒˆใƒชใ‚’ไฝฟใ„ใŸใ„ใ€‚ใ„ใใคใ‹ใฎๅ›žใฏใงใใŸใ‘ใฉใ€ๆ‰‹ไฝœๆฅญใ ใจ่พ›ใ„ใ€‚\r\n" }, { "alpha_fraction": 0.6671270728111267, "alphanum_fraction": 0.7137430906295776, "avg_line_length": 18.820144653320312, "blob_id": "a99c9be423649d1b3b00b433b19ea8cdc79c057a", "content_id": "4e2852e3d0e77f0cfe89236dec947aae5bbb5edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5573, "license_type": "no_license", "max_line_length": 91, "num_lines": 139, "path": "/_posts/2017-08-17-meeting31.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš31st\r\ndate: 2017-08-17 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1) asobiๅŸบๅœฐ meets Code for Kobe๏ผๅฐ็ฌ ๅŽŸ (30ๅˆ†)\r\n (2) Gochiso meets Code for Kobe๏ผPhilip Nguyen (30ๅˆ†)\r\n (3) ่‰ๆดฅใฎ็ดนไป‹๏ผๅฅฅๆ‘ (5ๅˆ†)\r\n (4) ใ‚ปใƒŸใƒŠใƒผๆกˆๅ†…๏ผ่–ฌๅธซๅฏบ (5ๅˆ†)\r\n (5) Code for Japan Summit 2017๏ผ่ฅฟ่ฐท (15ๅˆ†)\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/488412461512444/)\r\n/ [PaperๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-31st-meeting-6o3XZItCRCiWv6F9zX7a8)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=3084)]\r\n\r\n\r\n1. ไผšๅ ด\r\n\r\n[ใ‚นใƒšใƒผใ‚นใ‚ขใƒซใƒ•ใ‚กไธ‰ๅฎฎ](http://www.spacealpha.jp/sannomiya/access.html)\r\n\r\n2. ใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (1) asobiๅŸบๅœฐ meets Code for Kobe๏ผๅฐ็ฌ ๅŽŸ (30ๅˆ†)\r\n- (2) Gochiso meets Code for Kobe๏ผPhilip Nguyen (30ๅˆ†)\r\n- (3) ่‰ๆดฅใฎ็ดนไป‹๏ผๅฅฅๆ‘ (5ๅˆ†)\r\n- (4) ใ‚ปใƒŸใƒŠใƒผๆกˆๅ†…๏ผ่–ฌๅธซๅฏบ (5ๅˆ†)\r\n- (5) Code for Japan Summit 2017๏ผ่ฅฟ่ฐท (15ๅˆ†)\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n3. ๅ‚ๅŠ ่ฒป\r\n\r\n1,000ๅ†† (ๅญฆ็”Ÿไปฅไธ‹็„กๆ–™)\r\n\r\n\r\n# asobiๅŸบๅœฐ meets Code for Kobe\r\n\r\nๅฐ็ฌ ๅŽŸใ•ใ‚“\r\n\r\n็‹ฌๅญฆใงไฟ่‚ฒๅฃซๅ›ฝๅฎถ่ณ‡ๆ ผๅ–ๅพ—ใ€4ๅนดใฎ็พๅ ด็ตŒ้จ“ใฎๅพŒใ€่ตทๆฅญใ€€2015๏ฝžๆง˜ใ€…ใชๅ ดๆ‰€ใงๆง˜ใ€…ใชใ‚คใƒ™ใƒณใƒˆ\r\n\r\n็ฅžๆˆธใ‚’้ธใ‚“ใ ็†็”ฑ๏ผšๆตทใƒปๅฑฑใŒใ‚ใ‚‹ใ€‚ๆญดๅฒใƒปๆ–‡ๅŒ–ใŒๆƒใฃใฆใ„ใ‚‹ใ€‚ๅญ่‚ฒใฆใซใ‚ˆใ„ใ€‚\r\n\r\n- ๆง˜ใ€…ใชๅŠ›๏ผˆใ‚ฏใƒชใ‚จใ‚คใƒ†ใ‚ฃใƒ–็ณปใจใ‹๏ผ‰ใ‚’ใคใ‘ใ‚‹ใซใฏใ€Œไฝ™็™ฝใ€ใŒๅฟ…่ฆใ€‚\r\n- ็พไปฃใฎๆง˜ใ€…ใชๅ•้กŒใซ็›ด้ขใ€‚\r\n - ใ“ใฉใ‚‚ใŸใกใฏใฉใ†ใชใฃใฆใ—ใพใ†ใฎใ‹๏ผŸ\r\n - ใฉใ†ใ—ใŸใ‚‰ใชใใชใ‚‹ใฎใ‹๏ผŸ\r\n - ๅนผ็จšๅœ’ใซใฏ่ฑŠๅฏŒใช็Ÿฅ่ฆ‹ใŒใ‚ใ‚‹ใฎใซใƒปใƒปใƒป\r\n - โ‡จไฟ่‚ฒๅฃซร—็คพไผšใƒ‡ใ‚ถใ‚คใƒณ\r\n\r\nasobiๅŸบๅœฐ๏ผˆใ‚คใƒ™ใƒณใƒˆ๏ผๅ…ฅใ‚Šๅฃใ‚’ใคใใฃใฆใ€ๅ‚ๅŠ ่€…๏ผˆๅญไพ›ใƒป่ฆช๏ผ‰ใฎ้–ขไฟ‚ๆ€งใ‚’ใƒ‡ใ‚ถใ‚คใƒณใ™ใ‚‹ใ€‚๏ผ‰\r\n\r\n- ่ฆชๅญใŒใƒชใƒ”ใƒผใƒˆใ—ใŸใใชใ‚‹ไป•ๆŽ›ใ‘๏ผˆ็‰นใซ่ฆชใŒใคใชใŒใ‚‹ใ‚ˆใ†ใซ๏ผ‰\r\n- ๅฎถใงไฝฟใˆใ‚‹ใƒขใƒŽใง้Šในใ‚‹ใƒ—ใƒญใ‚ฐใƒฉใƒ ใ‚’๏ผˆๅฎถใซๆŒใฃใฆๅธฐใฃใฆใ€็ถšใใง้Šในใ‚‹๏ผ‰\r\n- ใƒซใƒผใƒซใ‚’ๆฑบใ‚ใ‚‹ใ€‚๏ผˆๅญไพ›ใ‚‚่ฆชใ‚‚ใ™ในใฆใฎไบบใŒFair๏ผ‰\r\n\r\nasobiๅŸบๅœฐใฏใƒœใƒฉใƒณใƒ†ใ‚ฃใ‚ขใงใ‚„ใฃใฆใ„ใ‚‹\r\n\r\nasobiๅŸบๅœฐไปฅๅค–ใฎๆดปๅ‹•\r\n\r\n- ใ“ใฉใ‚‚ๆœชๆฅๆŽขๆฑ‚็คพใ€€http://kodomo-mirai-tankyu.com/\r\n- ใŠใ‚„ใ“ไฟ่‚ฒๅœ’๏ผˆ่ฆชใธใฎๅญ่‚ฒใฆๆ•™่‚ฒใฎๅ ด๏ผ‰ใ€€\r\n- ไฟ่‚ฒๅฃซใƒžใƒผใ‚ฏ\r\n - ใ‚‚ใฃใจใ‚‚ใฃใจ็คพไผšใงๅฝนใซ็ซ‹ใฆใ‚‹ไบ‹ใŒใ‚ใ‚‹ใฎใงใฏใชใ„ใ‹ใ€‚\r\n - ITใ‚’ๆดป็”จใ—ใฆใ†ใพใใคใชใ’ใฆใ„ใใŸใ„ใ€‚\r\n- ใƒŸใƒ‹ใƒ•ใƒฅใƒผใƒใƒฃใƒผใ‚ทใƒ†ใ‚ฃใ€€https://vimeo.com/141034483๏ผˆๆฌกๅ›žๆœชๅฎš๏ผ‰\r\n\r\n\r\n\r\n# Gochiso meets Code for Kobe๏ผPhilip Nguyen\r\n\r\nPhilip Nguyen\r\n\r\n- ไบฌ้ƒฝๅคงๅญฆๅ’ๆฅญๅพŒใ€็ฅžๆˆธใ‚นใ‚ฟใƒผใƒˆใ‚ขใƒƒใƒ—gatewayใซๅ‚ๅŠ \r\n- https://gochiso.jp/\r\n- NPOใจใƒฌใ‚นใƒˆใƒฉใƒณใ‚’ใคใชใ’ใ‚‹ใ‚ตใƒผใƒ“ใ‚น\r\n- ใƒฌใ‚นใƒˆใƒฉใƒณใง้ฃŸไบ‹ใ‚’ใ™ใ‚‹ใ“ใจใงใ€ใใฎ้‡‘้กใฎไธ€้ƒจใŒNPOใซๅฏ„ไป˜ใ•ใ‚Œใ‚‹ใ‚ตใƒผใƒ“ใ‚น\r\n\r\n- ใƒฌใ‚นใƒˆใƒฉใƒณใฏๆš‡ใชๆ™‚้–“ๅธฏใซใŠๅฎขใ‚’ใฉใ†ใ„ใ‚Œใ‚‹ใ‹ใŒ่ชฒ้กŒใ€‚ใ‚ˆใใ‚ใ‚‹ใฎใฏใƒ‡ใ‚ฃใ‚นใ‚ซใ‚ฆใƒณใƒˆใ™ใ‚‹ใ€‚\r\n- ใŸใ ใ—ใ€ใƒ‡ใ‚ฃใ‚นใ‚ซใ‚ฆใƒณใƒˆใฏใ€ใƒฌใ‚นใƒˆใƒฉใƒณใฎใ‚คใƒกใƒผใ‚ธใ‚’ๆใชใ†ใ€‚ๅบƒใพใฃใฆใ„ใ‹ใชใ„ใ€ใ„ใ„ใŠๅฎขใŒใคใ‹ใชใ„ใ€‚โ‡จๅฏ„ไป˜ใชใ‚‰ใ€ใ‚คใƒกใƒผใ‚ธใ‚‚ใ‚ˆใใชใ‚Šใ€SNSใชใ‚“ใ‹ใง๏ผˆใ„ใ„ใ‚คใƒกใƒผใ‚ธใง๏ผ‰ๅบƒใพใ‚Šใ‚„ใ™ใ„ใ€‚\r\n- NPOใฎๆ”ฏๆด่€…ใŒใŠๅฎขใซใชใ‚‹๏ผ\r\n\r\n- Gochisoใฏ10%, NPOใฏ10%ใ€œ50%,ใƒฌใ‚นใƒˆใƒฉใƒณใฏ40%ใ€œ80%ใฎๅ–ใ‚Šๅˆ†\r\n- ใ‚ฏใƒฉใ‚ฆใƒ‰ใƒ•ใ‚กใƒณใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’ใ™ใ‚‹ใ‚ˆใ‚Šใ€ๆ™‚้–“ใ‚‚ๆ‰‹้–“ใ‚‚ใŠ้‡‘ใ‚‚ใ‹ใ‹ใ‚‰ใชใ„ใ€‚\r\n\r\n- โ€œ้ฃŸโ€ใ‚’้€šใ˜ใฆใ€NPOใฎๆ–‡ๅŒ–ใ‚’ๆ—ฅๆœฌใซๆ นไป˜ใ‹ใ›ใŸใ„ใ€‚\r\n- ๆ™ฎๆฎตใฎ็”Ÿๆดปใ‚’ใ—ใชใŒใ‚‰ๅฏ„ไป˜ใงใใ‚‹ใ‚ˆใ†ใชไป•็ต„ใฟใ‚’ใ€‚\r\n- ๅฏ„ไป˜ใฎไฝฟใ„้“ใ‚’ๆ˜Ž็ขบใซใ™ใ‚‹ใ€‚ๅฏ„ไป˜ใ—ใ‚„ใ™ใ„ไป•็ต„ใฟใซใ™ใ‚‹ใ€‚\r\n\r\n- ไปŠใฏๅคง้˜ชใจ็ฅžๆˆธใฎ๏ผ‘๏ผ—ใƒฌใ‚นใƒˆใƒฉใƒณใจๅฅ‘็ด„ใ€‚NPOใฏ๏ผ’๏ผ–ๅ›ฃไฝ“ใŒๅ‚ๅŠ ใ€‚\r\n- ๅฏ„ไป˜ๅ…ˆใฎไฟก้ ผๆ€งใฎๆ‹…ไฟใจใ—ใฆNPOใฎใƒŸใƒƒใ‚ทใƒงใƒณใจใใฎ้”ๆˆใฎใŸใ‚ใฎๆดปๅ‹•ใฏใ‚ฏใƒชใ‚ขใซใ—ใฆใŠใๅฟ…่ฆใŒใ‚ใ‚‹ใ€‚\r\n- ๆพๆ‘๏ผžๆ—ฅๆœฌใ ใจใ€NPOใฎไฟก้ ผๆ€งใจใ„ใ†ใ‚ˆใ‚Šใ€็Ÿฅใ‚Šๅˆใ„ใŒใ‚ตใƒใƒผใ‚ฟใƒผใซใชใ‚‹ใ“ใจใŒใปใจใ‚“ใฉใ€‚็Ÿฅใ‚Šๅˆใ„ใ‹ใ‚‰ใฎๅฏ„ไป˜ใƒขใƒ‡ใƒซใฎๆ–นใŒๆ—ฅๆœฌใซใฏๅˆใ†ใฎใงใฏใชใ„ใ‹๏ผŸ\r\n\r\n\r\n# Code For Kusatsuใฎ็ดนไป‹\r\n\r\nๅฅฅๆ‘ใ•ใ‚“\r\n\r\n- ๆฏŽๆœˆ็ฌฌ๏ผ“ๆฐดๆ›œๆ—ฅใซๆดปๅ‹•๏ผˆไปŠๆœˆใฏ๏ผ’๏ผ“ๆ—ฅใซ้–‹ๅ‚ฌ๏ผ‰\r\n- ไฝ•ใฎใ—ใŒใ‚‰ใฟใ‚‚ใชใๅ–‹ใ‚ŠใŸใ„ๆ–นใ€ๆฅใฆใใ ใ•ใ„๏ผ\r\n- ๅฎšไพ‹ไผšใซๅ‚ๅŠ ใ„ใŸใ ใ„ใŸๆ–นใฏ้ฃฒใฟไผšใซๅ‚ๅŠ ใงใใพใ™๏ผ\r\n\r\n\r\n\r\n# ใ‚ปใƒŸใƒŠใƒผๆกˆๅ†…๏ผˆๆ—ฅๆœฌไผๆฅญใฎๅฐ‘ใ—ๆœชๆฅใฎๅƒใๆ–น๏ผ‰\r\n\r\n่–ฌๅธซๅฏบใ•ใ‚“\r\n\r\nP&Gใฎๆฒณๅˆใ•ใ‚“ใจใ„ใ†ๆ–น๏ผˆใ‚ตใƒŸใƒƒใƒˆใฎใ‚ปใƒƒใ‚ทใƒงใƒณใซใ”็™ปๅฃ‡ใ„ใŸใ ใ๏ผ‰ใ‹ใ‚‰ใ”ๆกˆๅ†…\r\n\r\nhttps://biz-study.com/seminar/tsujino_paneldiscussion20170907_kobe/\r\n\r\n\r\n\r\n# Code for Japan Summit 2017\r\n\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\nๆœ€ๆ–ฐใฎ็Šถๆณใฎใ‚ทใ‚งใ‚ข\r\n- Code For Japan Summit้–ข้€ฃ\r\n - Code For Kobeๆž ๏ผˆๅˆๅฟƒ่€…ใฎใŸใ‚ใฎใ‚ณใƒผใƒ’ใƒผใƒ–ใƒฌใ‚คใ‚ฏ๏ผ‰ : ๅ‚ๅŠ ใ—ใŸใ‹ใ‚‰ใซใฏ่ชฐใ‹ใจใคใชใŒใฃใฆใปใ—ใ„ใ€‚\r\n - Code For Kobeใƒ–ใƒผใ‚น : ใƒ‘ใƒใƒซใ‚’ไฝœใฃใฆไธ€็ท’ใซๅ†™็œŸใ‚’ๆ’ฎใ‚Œใ‚‹ใ‚ˆใ†ใซใ€‚\r\nใ€€- ใ‚นใƒใƒณใ‚ตใƒผ\r\n - ไพ‹ๅนดใ‚ˆใ‚Šใฏ้›†ใพใฃใฆใ„ใ‚‹ใ€‚\r\n - ใ‚ฏใƒฉใ‚ฆใƒ‰ใƒ•ใ‚กใƒณใƒ‡ใ‚ฃใƒณใ‚ฐๅˆๆŒ‘ๆˆฆ๏ผˆ๏ผ—ไธ‡๏ผ˜ๅƒๅ††้›†ใพใฃใŸ๏ผ‰\r\n - ใ‚นใ‚ฟใƒƒใƒ•ๅ‹Ÿ้›†\r\n - ใ‚คใƒ™ใƒณใƒˆๆƒ…ๅ ฑใฎๆ‹กๆ•ฃใŠ้ก˜ใ„ใ—ใพใ™๏ผ\r\n - Code For Kobeใฎๅๅˆบใ‚’ไฝœใ‚ŠใŸใ„ไบบใ€ไธ€็ท’ใซไฝœใ‚Šใพใ—ใ‚‡ใ†ใ€‚\r\n\r\n- ๆฌกๅ›žใฎCode For Kobeๅฎšไพ‹ไผšใซใคใ„ใฆ\r\n - ๅฑ‹ๅฝข่ˆนใงใ‚„ใฃใฆใฟใŸใ„ใ€‚\r\n - ๏ผ‘๏ผ™ๆ™‚ใ‹ใ‚‰ใ‚นใ‚ฟใƒผใƒˆ\r\n - โ†’้€ฒใ‚ใ‚‹ๆ–นๅ‘ใง๏ผ\r\n\r\n" }, { "alpha_fraction": 0.6953248977661133, "alphanum_fraction": 0.7373217344284058, "avg_line_length": 18, "blob_id": "20427b3a4e864bcc73fa66d6e7830c91b00e7405", "content_id": "44f427a44496be920c746ec41045b3ac4bebb9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5022, "license_type": "no_license", "max_line_length": 122, "num_lines": 126, "path": "/_posts/2016-11-17-meeting22.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš22nd\r\ndate: 2016-11-17 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n โ‘ ใ€Œใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2016ใ€ใ‚ญใƒƒใ‚ฏใ‚ชใƒ•ๅ ฑๅ‘Š๏ผˆ้ซ˜ๆฉ‹ใƒปๅทไบ•ใƒป่ฅฟ่ฐท๏ผ‰\r\n โ‘กSocial Impact Bond๏ผˆๆ›ฝ็”ฐ๏ผ‰\r\n โ‘ขRails Girls Kobe็ดนไป‹ใจๆดปๅ‹•ๅ ฑๅ‘Š๏ผˆ็”ฐไธญ๏ผ‰\r\n โ‘ฃๅฟ˜ๅนดไผšใฎใ”ๆกˆๅ†…๏ผˆ่ฅฟ่ฐท๏ผ‰\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebookใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/1883857728512411/)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-22nd-meeting--AdqVkgrWwyyUCXmw9P_Op0IyAQ-aO18Ezthj1k8jpY6kAPHr)\r\n/ Links -\r\n\r\n+ ๏ฝžไนพๆฏ๏ฝž\r\n+ โ‘ ใ€Œใƒใƒฃใƒฌใƒณใ‚ธ๏ผ๏ผใ‚ชใƒผใƒ—ใƒณใ‚ฌใƒใƒŠใƒณใ‚น2016ใ€ใ‚ญใƒƒใ‚ฏใ‚ชใƒ•ๅ ฑๅ‘Š๏ผˆ้ซ˜ๆฉ‹ใƒปๅทไบ•ใƒป่ฅฟ่ฐท๏ผ‰\r\n+ โ‘กSocial Impact Bond๏ผˆๆ›ฝ็”ฐ๏ผ‰\r\n+ โ‘ขRails Girls Kobe็ดนไป‹ใจๆดปๅ‹•ๅ ฑๅ‘Š๏ผˆ็”ฐไธญ๏ผ‰\r\n+ โ‘ฃๅฟ˜ๅนดไผšใฎใ”ๆกˆๅ†…๏ผˆ่ฅฟ่ฐท๏ผ‰\r\n+ โ‘คใใฎไป–ๅ‹Ÿ้›†ไธญ๏ผ†่ชฟๆ•ดไธญ\r\n+ ๏ฝžไบคๆต๏ฝž\r\n\r\n# COG 2016\r\n้ซ˜ๆฉ‹ใ•ใ‚“\r\n\r\n็ฅžๆˆธๅธ‚ใจไธ‰็”ฐๅธ‚ใฎใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขๅ‡บใ—ใƒŸใƒผใƒ†ใ‚ฃใƒณใ‚ฐใ‚’ใ—ใพใ—ใŸใ€‚ใŠ้กŒใฏ็ฅžๆˆธๅธ‚๏ผ‘ใคใ€ไธ‰็”ฐๅธ‚๏ผ’ใคใ€‚\r\n<http://park.itc.u-tokyo.ac.jp/padit/cog2016/area/kinki.html#kobe-shi>\r\n\r\n็ฅžๆˆธๅธ‚ใฎๅ‹Ÿ้›†ๅ†…ๅฎนใ€Œ้œ‡็ฝ็”ปๅƒใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใจใ‚ขใƒ—ใƒชใ‚’ๆดป็”จใ—ใŸ็พๅœจๆ•™่‚ฒใฎไผๆ‰ฟใ€\r\nๅ†™็œŸใ‚’ไฝฟใฃใŸใกใ‚‡ใฃใจใ—ใŸใ‚ขใƒ—ใƒชใ‚’ไฝœใฃใฆใฟใ‚‹ใจใ‹ใ€้œ‡็ฝใซๅ‘ใ‘ใŸ็”Ÿๆดปใฎ่ฑ†็Ÿฅ่ญ˜ใจใ‹\r\n\r\nไธ‰็”ฐๅธ‚ๅ‹Ÿ้›†ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ข\r\n้ฟ้›ฃๆ‰€ใŒใจใฆใ‚‚้ ใ„\r\n้ฟ้›ฃๆ‰€ใซ้€ƒใ’ใ‚‹ใ‚ฒใƒผใƒ ใจใ‹๏ผŸ\r\n\r\nไธ‰็”ฐๅธ‚ๅ‹Ÿ้›†ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ข๏ผˆใ‚ทใƒ†ใ‚ฃใ‚ปใƒผใƒซใ‚น๏ผ‰\r\n\r\n2้€ฑใซไธ€ๅบฆใใ‚‰ใ„ใง้›†ใพใฃใฆ้€ฒใ‚ใ‚‰ใ‚ŒใŸใ‚‰ใ„ใ„ใ‹ใจใ€‚\r\nใƒกใƒณใƒใƒผ้šๆ™‚ๅ‹Ÿ้›†๏ผ\r\n\r\nsuggestions: \r\n\r\n- ๆฑๆ—ฅๆœฌๅคง้œ‡็ฝๅพŒใซ่ฃœๆญฃไบˆ็ฎ—ใงไฝ•ใ‹ใ‚„ใฃใฆใ„ใ‚‹ใฏใšใชใฎใงใ€ใใ‚Œใ‚’่ฆ‹ใฆใŠใ„ใŸใปใ†ใŒ่‰ฏใ•ใใ†ใ€‚\r\n- COG ใฎๅ‘Š็Ÿฅใฏใฉใ“ใง่กŒใ‚ใ‚Œใฆใ„ใ‚‹๏ผŸ\r\n- ็ฅžๆˆธๅคงๅญฆใŒใพใšใ‚„ใฃใฆใ„ใŸใ€‚\r\n- ่‡ชๆฒปไฝ“ใซใ‚ˆใฃใฆใฏ็›ดๆŽฅๅ‘Š็Ÿฅใ—ใฆใ„ใŸใ‚Šใ€‚\r\n- ๅฐผๅดŽใ‹ใ‚‰ไธ‰็”ฐใซ็งปๅ‹•ใ™ใ‚‹ไบบใฏ็ตๆง‹ใ„ใ‚‹ใ€‚\r\n- ไฝ็’ฐๅขƒๆ”นๅ–„ใŒๅคงใใช็†็”ฑใฎใ‚ˆใ†ใ ใ€‚\r\n\r\n# ใ‚ฝใƒผใ‚ทใƒฃใƒซใƒปใ‚คใƒณใƒ‘ใ‚ฏใƒˆใƒปใƒœใƒณใƒ‰\r\nๆ›ฝ็”ฐ ็ ”ไน‹ไป‹ใ•ใ‚“๏ผˆๅฐผๅดŽๅธ‚ไผ็”ป่ฒกๆ”ฟๅฑ€ๆ”ฟ็ญ–่ชฒ๏ผ‰\r\n\r\n## SIBใจใฏ\r\n\r\n้€šๅธธใฎ่กŒๆ”ฟใ‚ตใƒผใƒ“ใ‚นใฎๆฐ‘้–“ๅง”่จ—ใƒป่ฃœๅŠฉไบ‹ๆฅญ\r\n\r\n- ๆˆๆžœใฎๆœ‰็„กใซใ‹ใ‹ใ‚ใ‚‰ใšๆดปๅ‹•ใซใ‹ใ‹ใฃใŸ็ตŒ่ฒปใ‚’ๆ”ฏๆ‰•ใ†๏ผˆๅฑฅ่กŒ็ขบ่ช๏ผ‰\r\n- ใ‚ˆใ‚Šใ‚ˆใ„ใ‚‚ใฎใซใ™ใ‚‹ใจใ„ใ†ใ‚คใƒณใ‚ปใƒณใƒ†ใ‚ฃใƒ–ใŒๅƒใใซใใ„\r\n\r\nๅค–้ƒจ่ณ‡้‡‘ๆไพ›่€…ใ‚’ๅทปใ่พผใ‚“ใ ๆˆๆžœ้€ฃๅ‹•ๆ”ฏๆ‰•ใ„๏ผSIB\r\n\r\n- ่ฉ•ไพกใฏ็ฌฌไธ‰่€…ใŒ่กŒใ†ใ“ใจใซใชใฃใฆใ„ใ‚‹\r\n\r\n## ๆ—ฅๆœฌใซใŠใ‘ใ‚‹ใƒ‘ใ‚คใƒญใƒƒใƒˆไบ‹ๆฅญใฎ็Šถๆณ\r\n\r\n็พๆ™‚็‚นใงใฏๆ—ฅๆœฌ่ฒกๅ›ฃใŒๅฎŸ้จ“็š„ใซๆŠ•่ณ‡ใ‚’ใ—ใฆๅฎŸ้จ“ใ—ใฆใ„ใ‚‹ใ€‚\r\n\r\nไบ‹ไพ‹๏ผ‘๏ผšๆจช้ ˆ่ณ€ๅธ‚\r\n\r\n- ไธ€่ˆฌ็คพๅ›ฃๆณ•ไบบใƒ™ใ‚ขใƒ›ใƒผใƒ— <http://barehope.org/>\r\n- ๅ…็ซฅ้คŠ่ญท๏ผˆ็‰นๅˆฅ้คŠๅญ็ธ็ต„๏ผ‰๏ผšไธปใซๆ–ฐ็”Ÿๅ…ใ‚’ๅฏพ่ฑกใซใ—ใŸใ€Œ็‰นๅˆฅ้คŠๅญ็ธ็ต„ใ€ใ‚’ๆŽจ้€ฒใ™ใ‚‹ใ“ใจใซใ‚ˆใ‚Šใ€ๅฎถๅบญ็š„็”จ่ชžใฎๅฎŸ็พใจใ€็คพไผš็š„็”จ่ชžใ‚ณใ‚นใƒˆใฎไฝŽๆธ›ใ‚’ๅ›ณใ‚‹\r\n- ็›ฎๆจ™ 4 ไปถใฎใ†ใกใ€6ไปถใฎใ‚ฑใƒผใ‚นใซๅฏพๅฟœใ— 3 ไปถใฎๆˆ็ซ‹\r\n\r\nไบ‹ไพ‹๏ผ’๏ผšๅฐผๅดŽๅธ‚\r\n\r\n- ๅผ•ใใ“ใ‚‚ใ‚Šใฎ็”Ÿๆดปไฟ่ญทไธ–ๅธฏใฎใ‚ขใ‚ฆใƒˆใƒชใƒผใƒ๏ผˆๅฐฑๅŠดๆ”ฏๆด๏ผ‰\r\n- 15-39ๆญณใฎๅฐฑๅŠดๅฏ่ƒฝใช่‹ฅ่€…\r\n\r\n- 200ๅใฎใ†ใก 6 ๅใฎๅฐฑๅŠดใจ 4 ๅใฎๅฐฑๅŠดๅฏ่ƒฝๆ€งๅ‘ไธŠใ‚’็›ฎๆŒ‡ใ™\r\n- ่‚ฒใฆไธŠใ’ใƒใƒƒใƒˆ <https://www.sodateage.net/>\r\n- case worker ใฎๆจฉ้™ใ‚‚ๅฟ…่ฆใชใฎใงใ€ๅฎ˜ๆฐ‘้€ฃๆบใฎๅฝขใซใชใ‚‹ใ€‚\r\n\r\nไบ‹ไพ‹๏ผ“๏ผš็ฆๅฒกๅธ‚\r\n\r\n- ใใ‚‚ใ‚“ <https://www.kumon-lt.co.jp/>\r\n- ่ช็Ÿฅ็—‡ๆ”นๅ–„ใ€ๅญฆ็ฟ’็™‚ๆณ•\r\n\r\n## SIB ใฎไปŠๅพŒใฎ่ชฒ้กŒ\r\n- ๅฎ˜ๆฐ‘้€ฃๆบใงๅ…ฌๅ…ฑใ‚ตใƒผใƒ“ใ‚นใฎๆไพ›ใ‚’่กŒใ†ใ“ใจ\r\n- ๆˆๆžœๆŒ‡ๆจ™ใ‚’้–‹็™บใ—ใ€่พž่กจใฎๆˆๆžœใ‚’ๅฏ่ฆ–ๅŒ–ใ™ใ‚‹ใ“ใจ\r\n- ๆˆๆžœๆŒ‡ๅ‘ใฎ่ณ‡้‡‘ใฎๆตใ‚Œใ‚’็”Ÿใ‚€ใ“ใจ๏ผˆใฉใ“ใฎ่ฒป็”จใŒๆตฎใ„ใŸใฎใ‹้€†็ฎ—ใ—ใซใใ„๏ผ‰\r\n\r\n- ๅœฐๆ–นๅ…ฌๅ…ฑๅ›ฃไฝ“ใชใ‚‰ใงใ‚ƒใฎๅฎŸๅ‹™ไธŠใฎ่ชฒ้กŒ : ๅนดๅบฆๅ˜ไฝใฎไบˆ็ฎ—ใ€่กจ็„กๅง”่จ—ใงใฎๆณ•ไปคไธŠใฎๅˆถ็ด„\r\n- ๅฎข่ฆณ็š„ใชไบ‹ๆฅญ่ฉ•ไพกๆ‰‹ๆณ•ใฎ็ขบ็ซ‹ใจไบ‹ๆฅญใฎๆŽก็ฎ—ๆ€ง : ใ‚จใƒ“ใƒ‡ใƒณใ‚นใจใชใ‚‹ใƒ‡ใƒผใ‚ฟใŒใ‚‚ใฃใจๅฟ…่ฆใ€‚ใƒ•ใ‚กใ‚คใƒŠใƒณใ‚นใฎๅฐ‚้–€ๅฎถใ‚‚ๅฟ…่ฆใ€‚\r\n- ๆ–ฐใ—ใ„ใ“ใจใธใฎๆŠตๆŠ—ๆ„Ÿ\r\n\r\n# Rails Girls Kobe ็ดนไป‹๏ผ†ๆดปๅ‹•ๅ ฑๅ‘Š\r\n็”ฐไธญ็พŽไฝณใ•ใ‚“\r\n\r\nRails Girls Kobe <http://railsgirls.com/kobe201611>\r\n\r\nRails girls ใจใฏโ€ฆใƒ•ใ‚ฃใƒณใƒฉใƒณใƒ‰็™บใ€‚[ใƒชใƒณใƒ€ใƒปใƒชใ‚ฆใ‚ซใ‚นใ•ใ‚“](http://www.shoeisha.co.jp/book/rubynobouken/)\r\nๆ—ฅๆœฌใงใฏ 2012ๅนดใ‹ใ‚‰27ๅ›ž้–‹ๅ‚ฌ\r\n\r\n- Rails Girls Kobe ๅˆๅ›žใฏ 6 ๅใฎใ‚ฌใƒผใƒซใ‚บใŒๅ‚ๅŠ \r\n- ไบŒๅ›ž็›ฎใฏ 7 ๅใฎใ‚ฌใƒผใƒซใ‚บใŒๅ‚ๅŠ \r\n\r\n็ฅžๆˆธ Rails ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใธใฎๅ‚ๅŠ ใฎใใฃใ‹ใ‘ใซใชใ‚Œใฐ\r\n\r\n- [Rails Follow-up Kobe](https://rails-followup-kobe.doorkeeper.jp/)\r\n- [Kobe.rb](https://koberb.doorkeeper.jp/)\r\n\r\n- ใ‚ฌใƒผใƒซใ‚บๅ‹Ÿ้›†ไธญ๏ผˆๅ‚ๅŠ ่€…๏ผ‰\r\n- ไผšๅ ดใ‚‚ๆŽขใ—ไธญ\r\n- 20-30ๅใใ‚‰ใ„ใง\r\n\r\n- RESAS API\r\n- ไพ‹ใˆใฐใƒ™ใƒƒใƒ‰ใ‚ฟใ‚ฆใƒณใซไฝใ‚“ใงใ„ใ‚‹ไบบใฎ่ท็จฎๆง‹ๆˆใจใ‹ใฏ่ฆ‹ใˆใฆใ„ใชใ„\r\n- ๅœฐๆ–นใฎ้€ฃๅˆใง้ƒฝๅธ‚้ƒจใซใพใ•ใ‚‹ๅผทใฟ\r\n- ไป–ๅœฐๅŸŸใฎ่กŒๆ”ฟใฎ็Šถๆณใฏใ‚ˆใใ‚ใ‹ใ‚‰ใชใ„\r\n\r\n\r\n" }, { "alpha_fraction": 0.5838533639907837, "alphanum_fraction": 0.6914976835250854, "avg_line_length": 19.905982971191406, "blob_id": "9bdfaf41e499904e7857fc2b6e7d576b02302f7a", "content_id": "d541cb7f2293de8773cc0ccf48f09ef36b95261a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4456, "license_type": "no_license", "max_line_length": 236, "num_lines": 117, "path": "/_posts/2018-01-18-meeting35.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš35th\r\ndate: 2018-01-18 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)ใพใชใณใจๆดปๅ‹•็ดนไป‹(ไธญๅฑฑ)\r\n (2)ๅญฆไผšๅ‘Š็Ÿฅ(็•‘)10ๅˆ†\r\n (3)ไปŠๅนดใฎๆดปๅ‹•ใซใคใ„ใฆ(่ฅฟ่ฐท)\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/549151432091013/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/H1lAxlAVM)\r\n/ Links: [ๅพŒ่—คใ•ใ‚“](http://masaki-ravens.com/main/blog/everythingispractice/?p=4075)]\r\n\r\n\r\n# Code for Kobe 35th\r\n\r\nITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ŒCode for Kobeใ€ใฎ็ฌฌ35ๅ›žๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผ2018ๅนด็ฌฌ1ๅผพ๏ผๅˆใ‚ใพใ—ใฆใฎๆ–นใ€ใฉใชใŸใงใ‚‚ๅ‚ๅŠ ๆญ“่ฟŽใงใ™๏ผ\r\n\r\n1.ๅ ดๆ‰€\r\n\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ” <http://www.kigyoplaza-hyogo.jp/>\r\n\r\n2.ใŠๅ“ๆ›ธใ โ€ปๆ•ฌ็งฐ็•ฅ\r\n\r\n- ใ€œไนพๆฏใ€œ\r\n- (1)ใพใชใณใจๆดปๅ‹•็ดนไป‹(ไธญๅฑฑ)\r\n- (2)ๅญฆไผšๅ‘Š็Ÿฅ(็•‘)10ๅˆ†\r\n- (3)ไปŠๅนดใฎๆดปๅ‹•ใซใคใ„ใฆ(่ฅฟ่ฐท)\r\n- โ€ปใใฎไป–่ชฟๆ•ดไธญ๏ผ†้ฃ›ใณๅ…ฅใ‚Šใ‚‚ๆญ“่ฟŽ๏ผ\r\n- ใ€œใƒใƒƒใƒˆใƒฏใƒผใ‚ญใƒณใ‚ฐใ€œ\r\n\r\n\r\n## ใพใชใณใจ็ดนไป‹\r\nNPOๆณ•ไบบใพใชใณใจ ไธญๅฑฑใ•ใ‚“\r\n\r\n- [ๆ”พ่ชฒๅพŒๅญฆใณใ‚นใƒšใƒผใ‚นใ‚ขใ‚ทใ‚นใƒˆ](http://manabitomanabi.com/%E3%83%97%E3%83%AD%E3%82%B8%E3%82%A7%E3%82%AF%E3%83%88%E7%B4%B9%E4%BB%8B/%E6%94%BE%E8%AA%B2%E5%BE%8C%E5%AD%A6%E3%81%B3%E3%82%B9%E3%83%9A%E3%83%BC%E3%82%B9%E3%82%A2%E3%82%B7%E3%82%B9%E3%83%88/)\r\n - ไธญๅญฆ๏ฝž้ซ˜ๆ ก 18:30--20:30\r\n- [ๆ—ฅๆœฌ่ชžๆ•™ๅฎคใ ใ‚“ใ‚‰ใ‚“](http://manabitomanabi.com/%E3%83%97%E3%83%AD%E3%82%B8%E3%82%A7%E3%82%AF%E3%83%88%E7%B4%B9%E4%BB%8B/%E6%97%A5%E6%9C%AC%E8%AA%9E%E6%95%99%E5%AE%A4%E3%81%A0%E3%82%93%E3%82%89%E3%82%93/)\r\n - ๆ—ฅๆœฌไบบใฎๅคงๅญฆ็”ŸใŒๆ•™ใˆใ‚‹ๅฝขๅผใงใ€ใƒœใƒฉใƒณใ‚ฟใƒชใƒผใชๅฝขใซใชใฃใฆใ„ใ‚‹\r\n - ๅบงๅญฆใ‚ˆใ‚Šใ‚‚ไผš่ฉฑใ‚’ไธปไฝ“\r\n- [็ฅžๆˆธใ“ใฉใ‚‚ๆŽขๆคœ้šŠ](http://manabitomanabi.com/%E3%83%97%E3%83%AD%E3%82%B8%E3%82%A7%E3%82%AF%E3%83%88%E7%B4%B9%E4%BB%8B/%E7%A5%9E%E6%88%B8%E3%81%93%E3%81%A9%E3%82%82%E6%8E%A2%E9%99%BA%E9%9A%8A/)\r\n- [ๅŒ—้‡Žใใ‚“ๅฎถ](https://kitanokunchi.com/)๏ผˆๆฐ‘้–“ๅญฆ็ซฅไฟ่‚ฒๆ–ฝ่จญ๏ผ‰\r\n - ๅฐๅญฆๆ ก 1-3 ๅนด\r\n - ๆฐ‘้–“ใ€ๅฐ‘ไบบๆ•ฐใ ใ‹ใ‚‰ใ“ใๅ–ใ‚Š็ต„ใ‚ใ‚‹่ชฒ้กŒใ‚‚ๆ‰ฑใฃใฆใ„ใ‚‹\r\n\r\nใ€Œใ„ใพใ‚ใ‚‹ๅฝ“ใŸใ‚Šๅ‰ใ€ใงใฏใชใใ€Œใƒ’ใƒˆใจใ—ใฆใฎๅฝ“ใŸใ‚Šๅ‰ใ€ใ‚’ๅฑŠใ‘ใ‚‹\r\n\r\n่ชใ‚ใ‚‰ใ‚Œใ‚‹ใ“ใจใฎ้‡่ฆๆ€ง\r\n\r\nๅญฆใณใ‚’่‚ฒใฆใ‚‹่จ€่‘‰\r\n\r\nๅฉฆไบบไผšใƒป็คพไผš็ฆ็ฅ‰ๅ”่ญฐไผšใƒปๅญฆๆ กใฎๆ ก้•ทใชใฉใฎ็น‹ใŒใ‚Šใงๅ‹•ใ„ใฆใ„ใ‚‹ใ€‚\r\n\r\n\r\n## ๆƒ…ๅ ฑใ‚ณใƒŸใƒฅใƒ‹ใ‚ฑใƒผใ‚ทใƒงใƒณๅญฆไผš\r\n็•‘ๅ…ˆ็”Ÿ ๅคงๆ‰‹็”บๅคงๅญฆ ใ•ใใ‚‰ๅค™ๅทใ‚ญใƒฃใƒณใƒ‘ใ‚น\r\n\r\n็คพไผšไบบๅ‘ใ‘ ITC ใ‚’็”จใ„ใŸๆ•™่‚ฒ ้€šไฟกๆ•™่‚ฒ่ชฒ็จ‹ใฎ็ซ‹ใกไธŠใ’ใ‚’ใ‚„ใฃใฆใใŸ๏ผˆ็พ่ฒฌไปป่€…๏ผ‰\r\n่ˆน่ˆถใฎ้€šไฟกใ‚ทใ‚นใƒ†ใƒ ๏ผˆFURUNOใจๅ…ฑๅŒ็ ”็ฉถใ‚‚๏ผ‰\r\nๅœฐๅŸŸSNSใฎ็ ”็ฉถ\r\n\r\nๆƒ…ๅ ฑใ‚ณใƒŸใƒฅใƒ‹ใ‚ฑใƒผใ‚ทใƒงใƒณๅญฆไผš ๅ…จๅ›ฝๅคงไผš\r\n<http://www.cis.gr.jp>\r\n- ใ‚ฐใƒฉใƒ•ใ‚ฃใƒƒใ‚ฏใƒ•ใ‚กใ‚ทใƒชใƒ†ใƒผใ‚ทใƒงใƒณใ‚’ๅ…จใ‚ปใƒƒใ‚ทใƒงใƒณใซไป˜ใ‘ใ‚‹ไบˆๅฎš\r\n- ใ€ŒๅœฐๅŸŸๅ…ฑๅ‰ตใจใ‚ณใƒŸใƒฅใƒ‹ใ‚ฑใƒผใ‚ทใƒงใƒณใ€\r\n\r\n## Urban Innovation Kobe\r\n่–ฌๅธซๅฏบใ•ใ‚“\r\n\r\nFinal pitch ใง่ฝ้ธ\r\n\r\nใใ‚Œใฏใใ‚Œใจใ—ใฆ\r\n\r\nๆถˆ้˜ฒ่ปŠใฎ้…่ปŠ็ตๅฑ€ใฉใ†ใชใฃใฆใ‚‹ใฎใ‹ใƒ‡ใƒผใ‚ฟใ‚’ใใกใ‚“ใจๅˆ†ๆžใ—ใŸใ„ใ‚ˆใญ\r\n\r\n\r\n\r\n## Code for Kobe\r\n\r\nfacebook ๅบƒๅ‘Šๆ‰“ใฃใฆใฟใพใ—ใŸ\r\n- Code for Japan ้™ฃๅ†…ใ•ใ‚“ใ‹ใ‚‰ใ€Œๅ›ฐใ‚Šใ”ใจๆ”ฏๆดใ—ใพใ™ใ‚ˆใ€ใง\r\n- ๅ‚ๅŠ ่€…ใ‚’ๅข—ใ‚„ใ—ใŸใ„ใจใ„ใ†ใฎใŒๅคšใ‹ใฃใŸ\r\n\r\nใŠ้‡‘ใฎ่ฉฑ\r\n- ๅ‚ๅŠ ่ฒปใงใ„ใกใŠใ†ใพใ‚ใฃใฆใ„ใ‚‹\r\n- ๅฎšไพ‹ไผšใงใฎ่ปฝ้ฃŸ่ฒปใงๆถˆ่ฒปใ—ใŸใ‚Šใ—ใฆใ„ใ‚‹ใ‘ใ‚Œใฉใ‚‚ใ€ๅ‚ๅŠ ใ‚’ๆŠผใ•ใšใซๆฅใฆใ„ใ‚‹ไบบใ‚‚ใ„ใฆใ€ๅฐ‘ใ—ใšใคไฝ™ใ‚Šๆฐ—ๅ‘ณใซใชใฃใฆใ„ใ‚‹\r\n- \r\n\r\nใ‚‚ใใ‚‚ใไผš๏ผˆใƒใƒƒใ‚ซใ‚ฝใƒณ๏ผ‰ใ—ใŸใ„๏ผš่ฅฟ่ฐทใ•ใ‚“\r\n- ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใชใฉ\r\n- ๅนณๆ—ฅๅคœใซ้›ฃใ—ใ„ไบบ๏ผšๅฅณๆ€งใจใ‹ใ‚‚ๆฅใ‚Œใ‚‹ๅ ดๆ‰€ใซใ—ใŸใ„\r\n- ใ‚‚ใใ‚‚ใไผš๏ผŸ\r\n\r\n## ใ‚ใ—ใŸใŒ / ใ‚ใ—ใ‚„ใŸใŒใ‚„ใ™๏ผš็ญ’ไบ•ใ•ใ‚“\r\n- ่Šฆๅฑ‹ใงใฎ้›†ใพใ‚‹ๅ ดๆ‰€ใ‚’ใฏใ˜ใ‚ใŸ\r\n- ใƒŽใƒณใ‚ธใƒฃใƒณใƒซใ€ๅœฐๅŸŸใ€ๆ–‡ๅŒ–ใชใฉใซใƒ•ใ‚ฉใƒผใ‚ซใ‚น\r\n- ใ‚ฒใ‚นใƒˆใฎใƒˆใƒผใ‚ฏใ‚’็จฎใซไบบใฎ็น‹ใŒใ‚Šใ‚’ไฝœใ‚‹ใฎใŒ็›ฎ็š„\r\n- <https://www.facebook.com/ashitaga/>\r\n\r\n## 3D work ไปถ:็ญ’ไบ•ใ•ใ‚“\r\n- ่Šฆๅฑ‹ๅธ‚ใฎๅบๅ†…ใฎๅ‹‰ๅผทไผš\r\n- ใ‚ฐใƒฉใƒฌใ‚ณใ€ใชใฉใƒใ‚ฟใซใ‚„ใฃใŸใ€‚CfKใจใƒใ‚ฟใง้€ฃๆบใ—ใŸใ„ใจใ‹๏ผŸ\r\n\r\n## ่Šฆๅฑ‹็‰ˆใƒ‡ใƒผใ‚ฟใ‚ขใ‚ซใƒ‡ใƒŸใƒผ๏ผš็ญ’ไบ•ใ•ใ‚“\r\n- ๅธ‚ๅทใ•ใ‚“ใ‚’ๆ‹›ใ„ใฆใ‚„ใฃใŸ\r\n- ไปŠๅพŒใ‚‚๏ผŸ\r\n\r\n## ็ฅžๆˆธใ‚ซใƒ•ใ‚งใƒปใ‚นใ‚ฃใƒผใƒ„ๆ•ฃ็ญ–ใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃ #01 ๏ผšๅ‚ใƒŽไธ‹ใ•ใ‚“\r\n<https://peatix.com/event/329116>\r\n- OSMใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃ\r\n- \r\n" }, { "alpha_fraction": 0.7228314876556396, "alphanum_fraction": 0.7627118825912476, "avg_line_length": 19.64748191833496, "blob_id": "391293695ae2ccd2b023f8e468a94636aebfe042", "content_id": "9e9918f244b2eff3d899a7d8155b1d433a7a2d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5851, "license_type": "no_license", "max_line_length": 106, "num_lines": 139, "path": "/_posts/2018-03-15-meeting37.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš37th\r\ndate: 2018-03-15 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1) World Data Viz ChallengeใจInternational Open Data Dayใฎๅ ฑๅ‘Š(ๅพŒ่—ค)10ๅˆ†\r\n (2)ใพใฃใดใinSUMAๅ‘Š็Ÿฅ(ๆœจๆˆธ)5ๅˆ†\r\n (3)ใ‚ขใƒกใƒชใ‚ซๅ‡บๅผตใƒปKobexBRAVEๅ ฑๅ‘Š๏ผ†ๅ‚ๅŠ ใƒใƒผใƒ ็ดนไป‹(่–ฌๅธซๅฏบ)30ๅˆ†\r\n โ‘ SF/SDๅ‡บๅผตใจKobexBRAVEๅ ฑๅ‘Š(่–ฌๅธซๅฏบ)\r\n ใƒปSFใจSDใซ่กŒใฃใฆๆฅใŸใฎใงใใฎๅ ฑๅ‘Š\r\n ใƒป็ฅžๆˆธๅธ‚ไธปๅ‚ฌใซใ‚ˆใ‚‹VCใจใ‚ขใ‚ฏใ‚ปใƒฉใƒฌใƒผใ‚ทใƒงใƒณใฎ้–‹ๅ‚ฌๅ ฑๅ‘Š\r\n โ‘กKobexBRAVEใ‚ˆใ‚Š2ใƒใƒผใƒ ็ดนไป‹\r\n ใƒปๆ ชๅผไผš็คพT-ICUใƒผ้ ้š”ICUใ‚ทใ‚นใƒ†ใƒ \r\n ใƒปBugMoใƒผ้ฃข้ค“ๅ•้กŒใฎ่งฃๆฑบใซๅ‘ใ‘ใฆ\r\n (4)NASAใƒใƒƒใ‚ซใ‚ฝใƒณใ‚ญใƒƒใ‚ฏใ‚ชใƒ•@078ใซๅ‘ใ‘ใŸใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—(้•ทไบ•ใƒปใจใ‚Šใ‚„ใพ)30ๅˆ†\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/984119391751264/)\r\n/ [HackMDๅŽŸ็จฟ](https://hackmd.io/s/B1gxEpDKM)\r\n/ Links:\r\n\r\n# Code for Kobe 37th\r\n\r\nhttps://www.facebook.com/events/172246683410369/\r\n\r\nITใง็ฅžๆˆธใ‚’ใ‚‚ใฃใจ็ด ๆ•ตใช่ก—ใซใ™ใ‚‹ใŸใ‚ๆดปๅ‹•ใ™ใ‚‹ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใ€ŒCode for Kobeใ€ใฎ็ฌฌ37ๅ›žๅฎšไพ‹ไผšใ‚’้–‹ๅ‚ฌใ—ใพใ™๏ผๅˆใ‚ใพใ—ใฆใฎๆ–นใ‚‚ใฉใชใŸใงใ‚‚ๅ‚ๅŠ ๆญ“่ฟŽใงใ™๏ผ\r\n\r\n1.ๅ ดๆ‰€\r\n่ตทๆฅญใƒ—ใƒฉใ‚ถใฒใ‚‡ใ†ใ”\r\n(http://www.kigyoplaza-hyogo.jp/)\r\nโ€ป19ๆ™‚ไปฅ้™ๅ…ฅ้คจ็ตŒ่ทฏใŒ่ค‡้›‘ใซใชใ‚Šใพใ™๏ผ้…ๅˆปใ•ใ‚Œใ‚‹ๆ–นใฏใƒกใƒƒใ‚ปใƒผใ‚ธ็ญ‰ใงใŠ็Ÿฅใ‚‰ใ›ใใ ใ•ใ„ใ€‚\r\n\r\n2.ใŠๅ“ๆ›ธใ(ๆ•ฌ็งฐ็•ฅ)\r\nใ€œไนพๆฏใ€œ\r\n(1) World Data Viz ChallengeใจInternational Open Data Dayใฎๅ ฑๅ‘Š(ๅพŒ่—ค)10ๅˆ†\r\n(2)ใพใฃใดใinSUMAๅ‘Š็Ÿฅ(ๆœจๆˆธ)5ๅˆ†\r\n(3)ใ‚ขใƒกใƒชใ‚ซๅ‡บๅผตใƒปKobexBRAVEๅ ฑๅ‘Š๏ผ†ๅ‚ๅŠ ใƒใƒผใƒ ็ดนไป‹(่–ฌๅธซๅฏบ)30ๅˆ†\r\nโ‘ SF/SDๅ‡บๅผตใจKobexBRAVEๅ ฑๅ‘Š(่–ฌๅธซๅฏบ)\r\nใƒปSFใจSDใซ่กŒใฃใฆๆฅใŸใฎใงใใฎๅ ฑๅ‘Š\r\nใƒป็ฅžๆˆธๅธ‚ไธปๅ‚ฌใซใ‚ˆใ‚‹VCใจใ‚ขใ‚ฏใ‚ปใƒฉใƒฌใƒผใ‚ทใƒงใƒณใฎ้–‹ๅ‚ฌๅ ฑๅ‘Š\r\nโ‘กKobexBRAVEใ‚ˆใ‚Š2ใƒใƒผใƒ ็ดนไป‹\r\nใƒปๆ ชๅผไผš็คพT-ICUใƒผ้ ้š”ICUใ‚ทใ‚นใƒ†ใƒ \r\nใƒปBugMoใƒผ้ฃข้ค“ๅ•้กŒใฎ่งฃๆฑบใซๅ‘ใ‘ใฆ\r\n(4)NASAใƒใƒƒใ‚ซใ‚ฝใƒณใ‚ญใƒƒใ‚ฏใ‚ชใƒ•@078ใซๅ‘ใ‘ใŸใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—(้•ทไบ•ใƒปใจใ‚Šใ‚„ใพ)30ๅˆ†\r\nโ€ปใ”ๅธŒๆœ›ใ‚ใ‚Œใฐ้šๆ™‚ๅ—ใ‘ไป˜ใ‘ใพใ™๏ผ\r\nใ€œไบคๆตใ€œ\r\n\r\n3.ๅ‚ๅŠ ่ฒป\r\n1,000ๅ††(ๅญฆ็”Ÿไปฅไธ‹็„กๆ–™)\r\n\r\n## World Data Viz ChallengeใจInternational Open Data Dayใฎๅ ฑๅ‘Š\r\nๅพŒ่—ค ใ•ใ‚“\r\n\r\nWDVC 2017 ็ฅžๆˆธใƒฉใ‚ฆใƒณใƒ‰(http://kobe-barcelona.net/#infoKOBE) ๅ‚ๅŠ ใ—ใพใ—ใŸ\r\n\r\n็ฅžๆˆธๅคงๅญฆใฎๅญฆ็”Ÿใฎใ‹ใŸใฎ็™บ่กจใ‚‚ใŸใใ•ใ‚“ใ‚ใ‚Šใพใ—ใŸใ€‚\r\n\r\nใ€ŒKobe Demographics APIใ€ใง็ฅžๆˆธๅธ‚ใฎๅœฐๅ›ณไธŠใงไบบๅฃใฎ็Šถๆ…‹ใ‚’ API ็š„ใซไฝฟใˆใ‚‹ใƒ—ใƒฉใƒƒใƒˆใƒ•ใ‚ฉใƒผใƒ ใ‚’ไฝœใ‚ใ†ใจใ—ใฆใ„ใŸใ‚Šใ—ใพใ—ใŸใ€‚\r\n\r\nใƒŸใ‚จใƒซใ‚ซใฎใ‹ใŸใฎ่ฌ›ๆผ”ใ‚‚ๅฐ่ฑก็š„ใงใ—ใŸใ€‚[่ญฐๅ“กใƒšใƒ‡ใ‚ฃใ‚ข](http://proto.japanchoice.jp/)\r\n\r\n### IODD\r\n็ฅžๆˆธๅธ‚่ญฐใฎใƒ‡ใƒผใ‚ฟใ‚’ WikiData ใซๅ…ฅๅŠ›ใ™ใ‚‹ใ€Œใ‚‚ใใ‚‚ใไผšใ€ใ‚’ใ‚„ใ‚Šใพใ—ใŸใ€‚ไปปๆœŸใ‚„ๆ‰€ๅฑžใชใฉใ‚’ใ‚ณใƒณใƒ”ใƒฅใƒผใ‚ฟใŒ่ชญใ‚ใ‚‹ๅฝขใฎใƒ‡ใƒผใ‚ฟใจใ—ใฆใ€ๆทกใ€…ใจๅ…ฅๅŠ›ใ—ใฆใฟใ‚‹ใ€‚\r\n\r\n## ใพใฃใดใin SUMAๅ‘Š็Ÿฅ\r\nๆœจๆˆธ ใ•ใ‚“\r\n\r\nใ€Œ้ ˆ็ฃจใƒฆใƒ‹ใƒใƒผใ‚ตใƒซใƒ“ใƒผใƒใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ๏ฝžใงใใชใ„ใ‚’ใงใใŸใซๅค‰ใˆใ‚‹ใ€ใ‚’ใ‚„ใฃใฆใ„ใพใ™ใ€‚ๆตทใซ่ชฐใงใ‚‚ใฏใ„ใ‚Œใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใ€‚ๆตทไปฅๅค–ใซใ‚‚ใ€็•‘ใ‚‚ๆœจ็™ปใ‚Šใ‚‚ใ€‚้šœ็ข่€…ใ ใ‹ใ‚‰XXใŒใงใใชใ„ใ€ใงใฏใชใใ€่‡ชๅˆ†ใŸใกใงใงใใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใ€‚\r\n\r\nใ€Œใƒžใƒƒใƒ—ใŒๆฌฒใ—ใ„ใ€ใ€Œใƒžใƒƒใƒ—ใŒใชใ„ใ€ใจใ„ใ†่ชฒ้กŒใŒใ‚ใ‚‹ใ€‚\r\n\r\nOSM ใ‚’ไฝฟใฃใŸใƒžใƒƒใƒ”ใƒณใ‚ฐใ‚’ใ—ใ‚ˆใ†ใจใ—ใฆใ„ใ‚‹ใ€‚4/1 ้–‹ๅ‚ฌไบˆๅฎšใ€‚\r\n\r\nใ€Œใƒใ‚ธใƒ†ใ‚ฃใƒ–ใ€ใชใƒžใƒƒใƒ”ใƒณใ‚ฐใ€‚ใƒ”ใ‚ฏใƒ‹ใƒƒใ‚ฏ\r\n\r\nใ€Œใƒใ‚ธใƒ†ใ‚ฃใƒ–ใ€ใŸใ‚‹ใจใ“ใ‚ใฏใ€ไพ‹ใˆใฐไธ€่ˆฌ็š„ใซใฏ้ฟใ‘ใฆ้€šใ‚‹ใƒใƒชใ‚ขใƒผใซใ€Œๅๅ‰ใ€ใ‚’ไป˜ใ‘ใฆใ€ใƒžใƒƒใƒ”ใƒณใ‚ฐใ‚’ใ—ใฆใฟใ‚‹ใ€‚\r\n\r\n[ใ‚†ใ‚‹ใ‚นใƒใƒผใƒ„](http://yurusports.com/) ใ‚‚ๅŒๆ™‚้–‹ๅ‚ฌใ‚‰ใ—ใ„ใ€‚\r\n\r\n\r\n## ใ‚ขใƒกใƒชใ‚ซๅ‡บๅผตใƒปKobexBRAVEๅ ฑๅ‘Š๏ผ†ๅ‚ๅŠ ใƒใƒผใƒ ็ดนไป‹\r\n่–ฌๅธซๅฏบ ใ•ใ‚“\r\n\r\n### SF/SDๅ‡บๅผตใจKobexBRAVEๅ ฑๅ‘Š(่–ฌๅธซๅฏบ)\r\nใƒปSFใจSDใซ่กŒใฃใฆๆฅใŸใฎใงใใฎๅ ฑๅ‘Š\r\nใƒป็ฅžๆˆธๅธ‚ไธปๅ‚ฌใซใ‚ˆใ‚‹VCใจใ‚ขใ‚ฏใ‚ปใƒฉใƒฌใƒผใ‚ทใƒงใƒณใฎ้–‹ๅ‚ฌๅ ฑๅ‘Š\r\n\r\n- CONNECT https://www.connect.org/\r\n- INDIE BIO https://indiebio.co/\r\n- JLABS https://jlabs.jnjinnovation.com/\r\n\r\nINDIE BIO ใงใฏใ€PoC ใ‚’ไฝœใฃใฆใ€ๆฌกใฎ round seed ใงๅ’ๆฅญใ—ใฆใ„ใใ‚ตใ‚คใ‚ฏใƒซใ€‚BIO-tech ใฎ startup ใง่จญๅ‚™ๆŠ•่ณ‡ใซๅ›žใ›ใชใ„ใจใ“ใ‚ใ‚‚ใ‚ใ‚‹ใฎใงใ€ใใ†ใ„ใฃใŸๆ–ฝ่จญใŒไฝฟใˆใ‚‹ๆบ–ๅ‚™ใŒใ•ใ‚Œใฆใ„ใ‚‹ใ€‚\r\n\r\nBRAVE x KOBE ใงใฏใ€ใ‚ขใ‚คใƒ‡ใ‚ฃใ‚ขใ‹ใ‚‰ใƒ”ใƒƒใƒใซ่€ใˆใ‚‹ใ‚‚ใฎใซใ™ใ‚‹ใƒ—ใƒญใ‚ฐใƒฉใƒ ใจใชใฃใฆใ„ใ‚‹ใ€‚\r\n\r\n### KobexBRAVEใ‚ˆใ‚Š2ใƒใƒผใƒ ็ดนไป‹\r\n#### BugMoใƒผ้ฃข้ค“ๅ•้กŒใฎ่งฃๆฑบใซๅ‘ใ‘ใฆ\r\n\r\nBugMo Bar ๏ผใ‚ณใ‚ชใƒญใ‚ฎใ‹ใ‚‰ไฝœใฃใŸใ‚ฟใƒณใƒ‘ใ‚ฏ่ณช\r\n\r\nๅฎถ็•œใฎ่‚‰ใ‚’ๅพ—ใ‚‹ใŸใ‚ใซใ€้€”ไธŠๅ›ฝใงๆฃฎๆž—ไผๆŽกใŒ่กŒใ‚ใ‚Œใฆใ„ใ‚‹ใ€‚\r\n้€”ไธŠๅ›ฝใงใฏ่‚‰ใŒ้ซ˜ไพกใ™ใŽใฆๆ‘‚ๅ–ๅ›ฐ้›ฃใช็พ็ŠถใŒใ‚ใ‚‹ใ€‚\r\n\r\n้€šๅนดใ‚ณใ‚ชใƒญใ‚ฎใฎ้คŠๆฎ–ๆŠ€่ก“ใฎ้–‹็™บใ€้ฃŸๅ“้–‹็™บใ‚’่กŒใฃใฆใ„ใ‚‹ใ€‚\r\n\r\n\r\n##### ๆ ชๅผไผš็คพT-ICUใƒผ้ ้š”ICUใ‚ทใ‚นใƒ†ใƒ \r\n\r\nTele-ICU ้ ้š”้›†ไธญๆฒป็™‚\r\n\r\nๆ•‘ๆ€ฅๅฎค ER ใจ้›†ไธญๆฒป็™‚ๅฎค ICU ใฎ้•ใ„๏ผšER ใฏใƒใ‚ฟใƒใ‚ฟใจๅ‡ฆ็ฝฎใ—ใฆใ„ใ‚‹ใ€‚ICU ใฏใใ“ใ‹ใ‚‰ๅ›žๅพฉใซๅ‘ใ‹ใฃใฆใ„ใๅ ดๆ‰€ใงใ€ใใ‚Œใปใฉใƒใ‚ฟใƒใ‚ฟใ—ใชใ„ใ€‚\r\n\r\n่จบ็™‚ใƒฌใƒ™ใƒซใฎๅทฎใŒใ‚ใ‚‹ใฎใŒๅ•้กŒใซใชใฃใฆใ„ใ‚‹ใ€‚\r\n300 ๅฎคใฏๅฐ‚้–€ๅŒปใŒใ„ใ‚‹ใŒใ€800 ๅฎคใฏใ‚ผใƒญใ€‚\r\n\r\nT-ICU ใŒๆ‚ฃ่€…ใฎๆƒ…ๅ ฑใฎใƒใƒ–ใจใ—ใฆๆฉŸ่ƒฝใ™ใ‚‹ใ€‚\r\n\r\nDtoD ใจ DtoP ใงใ„ใ†ใจใ€DtoD ใฎใƒ“ใ‚ธใƒใ‚นใƒขใƒ‡ใƒซใ€‚\r\n\r\n\r\n## NASAใƒใƒƒใ‚ซใ‚ฝใƒณใ‚ญใƒƒใ‚ฏใ‚ชใƒ•@078ใซๅ‘ใ‘ใŸใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\n้•ทไบ• ใ•ใ‚“ใ€ใจใ‚Šใ‚„ใพ ใ•ใ‚“\r\n\r\nNASA Space challenge hackathon\r\n\r\nNASA ๅ…ฌๅผใ‚คใƒ™ใƒณใƒˆใงใ€ไธŠไฝ 3 ไฝใพใงใฏๅฏฉๆŸปใŒๅ—ใ‘ใ‚‰ใ‚Œใ€global ใซๅ‹ใกๆŠœใใจ NASA ใซๆ‹›ๅพ…ใ•ใ‚Œใพใ™ใ€‚\r\n\r\nๆ˜จๅนด๏ผš69 ใ‚ซๅ›ฝ 197 ไผšๅ ด\r\n\r\n## ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ\r\nๅ €็”ฐใ•ใ‚“\r\n\r\nใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ใ‚’ใ‚„ใฃใฆใ„ใพใ™ใ€‚ๆ•™ๆ้–‹็™บใ‚‚ๅง‹ใ‚ใฆใ„ใพใ™ใ€‚\r\nใ‚คใƒ™ใƒณใƒˆใ‚„ใฃใฆใ„ใใŸใ„ใชใ€‚\r\n" }, { "alpha_fraction": 0.6497154831886292, "alphanum_fraction": 0.7215505242347717, "avg_line_length": 21.04918098449707, "blob_id": "4ce63f5f4299732ba88f146cfc6515b4d53a969f", "content_id": "00c969282b97a55531bc0a63080771b14ba6613d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5130, "license_type": "no_license", "max_line_length": 162, "num_lines": 122, "path": "/_posts/2017-05-18-meeting28.md", "repo_name": "codeforkobe/codeforkobe.github.io", "src_encoding": "UTF-8", "text": "---\r\nlayout: post\r\ntitle: Code for Kobeๅฎšไพ‹ไผš28th\r\ndate: 2017-05-18 19:00:00 +09:00\r\ncomments: true\r\ndescription: |\r\n (1)FabLab near ็ฅžๆˆธ๏ผˆๆฃฎๆœฌ๏ผ‰\r\n (2)ๅฒกๆœฌๅ•†ๅบ—่ก—ใƒ–ใƒฉใ‚คใƒณใƒ‰ใƒปใƒžใƒƒใƒ”ใƒณใ‚ฐใƒ‘ใƒผใƒ†ใ‚ฃ๏ผˆๅ–œๅคš๏ผ‰โ€ปๅ‡บๅธญใงใใ‚Œใฐโ†’ไปŠๅ›žใฏๆ–ญๅฟต\r\n (3)ใ‚ตใ‚คใƒใƒผใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒผใ‚ปใƒŸใƒŠใƒผin็ฅžๆˆธ๏ผˆ็Ÿณๆฉ‹๏ผ‰\r\n (4)Code for Japan Summit 2017 ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ๏ผˆ่ฅฟ่ฐทใƒป้–ขๆˆธใƒปๅทไบ•๏ผ‰\r\ncategory: log\r\nkeywords: \r\ntags:\r\n- log\r\n---\r\n\r\n[Facebook ใ‚คใƒ™ใƒณใƒˆใƒšใƒผใ‚ธ](https://www.facebook.com/events/328202314262554)\r\n/ [ๆ—งHackpadๅŽŸ็จฟ](https://paper.dropbox.com/doc/Code-for-Kobe-28th-meeting--Adq02I9qg56rRRxH9u0G6zRsAQ-YmtvOHIMsKLrEBorZSkVW)\r\n/ Links: -\r\n\r\nๅ ดๆ‰€:[ใ‚นใƒšใƒผใ‚นใ‚ขใƒซใƒ•ใ‚กไธ‰ๅฎฎ](http://www.spacealpha.jp/sannomiya/access.html)\r\n\r\n# FabLab near ็ฅžๆˆธ\r\n\r\nๆฃฎๆœฌใ•ใ‚“\r\n\r\n## Fab ใจใฏ๏ผŸ\r\n\r\nfaburication = ไฝœใ‚‹ใ“ใจ\r\n\r\nfabulous = ใ‚นใƒใƒฉใ‚ทใ‚ค\r\n\r\nโ†’ๆฅฝใ—ใ„ใƒขใƒŽใฅใใ‚Šใ€ใ“ใจใฅใใ‚Šๅ…จ่ˆฌ\r\n\r\n## FAB ใฎใƒใƒƒใƒˆใƒฏใƒผใ‚ฏใ‚’็ซ‹ใกไธŠใ’ไธญ\r\n\r\nใฉใ“ใงไฝ•ใ‚’ใ‚„ใฃใฆใ„ใ‚‹ใฎใ‹ใ€ๆƒ…ๅ ฑใŒๆ‰‹ใซๅ…ฅใ‚‰ใชใ„็Šถๆ…‹ใซใชใฃใฆใ„ใ‚‹ใ€‚ใ“ใฎๅ•้กŒใ‚’่งฃๆถˆใ™ใ‚‹ใŸใ‚ใซใ€Facebook ใฎใƒšใƒผใ‚ธใ‚’็ซ‹ใกไธŠใ’ใฆใ„ใ‚‹ใ€‚\r\n\r\nFab ใƒใƒƒใƒˆ๏ผš <https://www.facebook.com/Fab%E3%83%8D%E3%83%83%E3%83%88-1214352645344197/>\r\n\r\n## ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆใฎใ”็ดนไป‹\r\n\r\nFab ใƒใƒƒใƒˆใฎไธญใง็ซ‹ใกไธŠใŒใฃใฆใ„ใ‚‹ใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ 2 ใค\r\n\r\n- ROBOCO ๏ผš <https://www.facebook.com/Roboco-with-Fab-1273017206151723/>\r\n - CAMPFIRE ใงใ‚ฏใƒฉใ‚ฆใƒ‰ใƒ•ใ‚กใƒณใƒ‡ใ‚ฃใƒณใ‚ฐๅฎŸๆ–ฝไธญ\r\n- ใƒ‘ใƒผใ‚ฝใƒŠใƒซ่ˆช็ฉบๆฉŸ้–‹็™บใƒ—ใƒญใ‚ธใ‚งใ‚ฏใƒˆ ๏ผš <https://www.facebook.com/PPKP.MRD/>\r\n - 8ๆœˆใ”ใ‚ใ‹ใ‚‰ใ‚ฏใƒฉใ‚ฆใƒ‰ใƒ•ใ‚กใƒณใƒ‡ใ‚ฃใƒณใ‚ฐๅฎŸๆ–ฝไบˆๅฎš\r\n - 11ๆœˆใ”ใ‚ใซๅˆ้ฃ›่กŒ็›ฎๆจ™\r\n - 1ๅฐ350ไธ‡ใใ‚‰ใ„๏ผŸ\r\n - ใ‚ชใƒผใ‚นใƒˆใƒชใ‚ข็”ฃใฎใ‚ธใ‚งใƒƒใƒˆใ‚จใƒณใ‚ธใƒณ\r\n - ๆŠ˜ใ‚Š็•ณใฟ็ฟผ\r\n - ่‡ชๅ‹•ๆ“็ธฆ\r\n\r\n# ใ‚ตใ‚คใƒใƒผใ‚ปใ‚ญใƒฅใƒชใƒ†ใ‚ฃใƒผใ‚ปใƒŸใƒŠใƒผin็ฅžๆˆธ\r\n็Ÿณๆฉ‹ใ•ใ‚“๏ผ ้–ข่ฅฟๆƒ…ๅ ฑใ‚ปใƒณใ‚ฟใƒผ\r\n\r\n6/30 ใจ 7/1 ใฎ 2 ใคใฎ้€ฃ็ถšใ—ใŸใ‚คใƒ™ใƒณใƒˆใ€‚6/30 ใฏไธ€่ˆฌๅ‘ใ‘ใ€‚7/1 ใฏๅญฆ็”Ÿๅ‘ใ‘ใง IPA ใŒใ‚„ใฃใฆใ„ใ‚‹ใ‚‚ใฎ๏ผˆใฎ็ฅžๆˆธ้–‹ๅ‚ฌ๏ผ‰ใ€‚\r\n\r\n- ไธ€่ˆฌ่ฌ›ๅบง 6/30\r\n - <https://secure.kiis.or.jp/cybersecurity/170630minicamp/>\r\n- ๅฐ‚้–€่ฌ›ๅบง 7/1\r\n - <http://www.security-camp.org/minicamp/kobe2017.html>\r\n\r\n# Code for Japan Summit 2017 ใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณ\r\n\r\n## Code for Kobe ใฎๆดปๅ‹•ใจ่ชฒ้กŒ\r\n่ฅฟ่ฐทใ•ใ‚“\r\n\r\n078kobe ใงใ—ใ‚ƒในใฃใŸใ“ใจใฎๅ†…ๅฎน\r\n\r\n- 2014 ๅนด 12 ๆœˆ่จญ็ซ‹\r\n\r\nCode for Kobeใฎ่ชฒ้กŒ\r\n\r\n- ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใจใ—ใฆใฏใ—ใฃใ‹ใ‚Š็ถ™็ถšใ—ใฆใ„ใ‚‹\r\n- ็†ฑ้‡ใ‚‚ใ‚ใ‚‹๏ผˆๆ‰“ใฆใฐ้Ÿฟใ๏ผ‰\r\n- ใใฎไธ€ๆ–นใงโ€ฆ\r\n- 35๏ฝž45ๆญณ็”ทๆ€งใŒไธญๅฟƒใงใ€ๅฅณๆ€งใ€ใ‚ขใ‚ฏใƒ†ใ‚ฃใƒ–ใ‚ทใƒ‹ใ‚ขใ€่‹ฅ่€…ใŒๅฐ‘ใชใ„๏ผˆใใ‚Œใžใ‚Œ5%๏ผ‰\r\n- ๅฎŸใฏใ‚จใƒณใ‚ธใƒ‹ใ‚ขใ‚‚ๅฐ‘ใชใ„๏ผˆ็ด„20%๏ผ‰\r\n- ๅฎŸใฏใ‚ขใ‚ฆใƒˆใƒ—ใƒƒใƒˆใ ใฃใฆไนใ—ใ„\r\n- ใ‚‚ใ—ใ‹ใ—ใฆไธปไฝ“ๆ€งใŒ่ถณใ‚Šใชใ„๏ผ๏ผŸ\r\n\r\n## Code for Japan Summit 2017 ใ‚ปใƒƒใ‚ทใƒงใƒณใพใจใ‚ใƒใƒผใƒ \r\n้–ขๆˆธใ•ใ‚“\r\n\r\n- ไผšๅ ดๅœฐๅ›ณ่ชฌๆ˜Ž\r\n- ้ƒจๅฑ‹ๅ‰ฒใ‚Š\r\n - ใƒ›ใƒผใƒซ๏ผšใ‚ญใƒผใƒŽใƒผใƒˆใƒปใƒ–ใƒชใ‚ฒใƒผใƒ‰LTใƒปไฝ“้จ“ไผšๅ ดใƒปใƒ–ใƒผใ‚นๅ‡บๅฑ•็ญ‰\r\n - ๅคงไผš่ญฐๅฎค๏ผšใ‚ฐใƒฉใƒฌใ‚ณ้šŠๆŽงๅฎค๏ผŸ\r\n - ็ฌฌไธ€็ ”ไฟฎๅฎค๏ผšใ“ใฉใ‚‚ๅ‘ใ‘ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—ๅ ๆœ‰๏ผŸ\r\n - ็ฌฌไบŒ็ ”ไฟฎๅฎค๏ผšใ‚ปใƒƒใ‚ทใƒงใƒณใ‚นใƒšใƒผใ‚น\r\n - ็ฌฌไธ‰็ ”ไฟฎๅฎค๏ผšใ‚ปใƒƒใ‚ทใƒงใƒณใ‚นใƒšใƒผใ‚น\r\n\r\n## Summit 2017 ใ‚ปใƒƒใ‚ทใƒงใƒณไฟ‚ใ‚ˆใ‚Š\r\n\r\n<https://speakerdeck.com/kwi/code-for-japan-summit-2017-setusiyonxi-yori>\r\n\r\n<script async class=\"speakerdeck-embed\" data-id=\"370d0bf732ee43b9b85d1d7c63d6355b\" data-ratio=\"1.33333333333333\" src=\"//speakerdeck.com/assets/embed.js\"></script>\r\n\r\n# ใƒฉใ‚ฆใƒณใƒ‰ใƒ†ใƒผใƒ–ใƒซ\r\nใƒ†ใƒผใƒ–ใƒซใ”ใจใซใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณใ—ใฆใ€ไผ็”ปๆกˆใ‚’ๅ‡บใ™\r\n\r\n- ่กŒๆ”ฟใƒปๅธ‚ๆฐ‘ใƒปไผๆฅญใŒใ„ใฃใ—ใ‚‡ใซใชใฃใฆ่ฉฑใ›ใ‚‹ๅ ด\r\n - ใƒ‘ใƒใƒซใ‹ใƒฉใ‚ฆใƒณใƒ‰ใƒ†ใƒผใƒ–ใƒซ\r\n - ๅŒป็™‚ๅˆ†้‡Žใจใ‹๏ผŸ\r\n- ใƒใƒƒใ‚ซใ‚ฝใƒณ\r\n - ใƒ†ใƒผใƒžใ”ใจใซ NPO ใŒใƒ”ใƒƒใƒใ™ใ‚‹\r\n - CAMPFIRE ใซ็™ป้Œฒใ—ใฆใ„ใ‚‹็ฅžๆˆธใฎๅ›ฃไฝ“ใ‚’ๅ‘ผใณ่พผใ‚“ใงใ€ใƒ”ใƒƒใƒใ‚’ๅซใ‚ใฆใ‚„ใฃใฆใ‚‚ใ‚‰ใ†\r\n - ๅ›ฐใ‚Šใ”ใจใ‚’่งฃๆฑบใ™ใ‚‹ใƒใƒƒใ‚ซใ‚ฝใƒณ๏ผšๅฝ“ๆ—ฅๅ‡บใฆใใ‚‹ใจใ‚‚้™ใ‚‰ใชใ„ใฎใงใ€ใ“ใฎๅ ดใงๅŒๆ™‚ใซใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใƒ“ใƒซใƒ‡ใ‚ฃใƒณใ‚ฐใ—ใฆใ€ไปŠๅพŒ็ถ™็ถšใงใใ‚‹ใ‚ˆใ†ใชไป•็ต„ใฟใŒใงใใ‚‹ใจใ„ใ„ใช\r\n- ใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐ 3 ๅˆ†้–“๏ผˆใ‚ญใƒฅใƒผใƒ”ใƒผ3ๅˆ†ใ‚ฏใƒƒใ‚ญใƒณใ‚ฐใฎใƒŽใƒชใง๏ผ‰\r\n - ้€ฒ่กŒใฏๆ–™็†็•ช็ต„้ขจใง\r\n - ใ‚ชใƒผใƒ—ใƒณใƒ‡ใƒผใ‚ฟใ‚ขใƒ—ใƒชใฎใƒชใƒใƒผใ‚นใ‚จใƒณใ‚ธใƒ‹ใ‚ขใƒชใƒณใ‚ฐใจใ‹๏ผŸ\r\n - ใƒฏใƒผใ‚ฏใ‚ทใƒงใƒƒใƒ—\r\n- ่‡ชๆฒปไฝ“ใฎไบบใŒ้›†ใพใ‚‹ใจใ€ใ‚ขใƒซใ‚ขใƒซใƒใ‚ฟใง็››ใ‚ŠไธŠใŒใ‚‹\r\n - ๆŠ•็จฟใ•ใ‚ŒใŸใƒ„ใ‚คใƒƒใ‚ฟใƒผใ‚’ใƒ†ใƒผใƒžใง้›†ใ‚ใฆใ€่งฃๆžใ—ใฆ็œบใ‚ใ‚‹๏ผŸ\r\n - ใ‚ใ‚‹ใ„ใฏ google form ใงๅŒฟๅใงใ‚ขใƒซใ‚ขใƒซใƒใ‚ฟใ‚’้›†ใ‚ใฆใ€่‡ชๆฒปไฝ“ใง็พๅฎŸใซ่ตทใ“ใฃใฆใ„ใ‚‹๏ผˆๆ€’ใฃใฆใ„ใ‚‹๏ผ‰ใ“ใจใ‚’ใ€๏ผˆไบ‹ๅ‹™ๅฑ€ใงๅˆคๆ–ญใ—ใŸใ†ใˆใง๏ผ‰ๅ…ฌ้–‹ใงใใ‚‹ใ‚ˆใ†ใซใ—ใฆใฟใ‚‹ใ€‚\r\n- ใ€Œใฏใ˜ใ‚ใฆใฎไบบๅ…จๅ“ก้›†ๅˆใ€\r\n - ๅˆๅฟƒ่€…ใฎไบบใŒๆฐ—่ปฝใซใƒ‡ใ‚ฃใ‚นใ‚ซใƒƒใ‚ทใƒงใƒณใงใใ‚‹ใ‚ˆใ†ใซ\r\n- ใƒ˜ใƒซใ‚นใƒ‡ใƒผใ‚ฟ๏ผˆๅŒป็™‚็ณป๏ผŸ๏ผ‰\r\n - ็ฅžๆˆธๅธ‚ใƒปๅ…ตๅบซ็œŒใฎ่กŒๆ”ฟใฎไบบใ‚’ใพใใ“ใฟใคใค\r\n - ๆฐ‘้–“ใง่ˆˆๅ‘ณใŒใ‚ใ‚‹ไบบใ‚’้›†ใ‚ใฆ่ฉฑใ—ๅˆใˆใ‚‹ๅ ด\r\n" } ]
25
chuxiuhong/frequency-counter
https://github.com/chuxiuhong/frequency-counter
deaf4bb660db5617232d79350802bda1462c37b2
e5e4ea45c762064f32dcd20ea60efe977d88c438
41d8e1fe0281d384f5bd7edf30e2a0df52fdf05d
refs/heads/master
2018-01-03T05:38:02.129777
2016-09-28T02:33:21
2016-09-28T02:33:21
69,417,584
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5515759587287903, "alphanum_fraction": 0.573782205581665, "avg_line_length": 28.08333396911621, "blob_id": "00c939ac95df37dbda254ab070f38efe90fff25f", "content_id": "cc2d02cd8385bf3821de762dce8e2120676f4958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/signal.py", "repo_name": "chuxiuhong/frequency-counter", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport sys\nimport frequency_counter2\np = frequency_counter2.FrequencyCounter()\nclass UI(QDialog):\n def __init__(self, parent=None):\n super(UI, self).__init__(parent)\n self.txt = QLineEdit()\n self.bt_ad = QPushButton(u\"้€‰ๆ‹ฉ่ทฏๅพ„\")\n self.bt_a = QPushButton(u\"ๅˆ†ๆž\")\n self.txtout = QTextEdit() \n self.pic1 = QLabel()\n self.pic2 = QLabel()\n lay = QGridLayout()\n lay.addWidget(self.txt, 1, 1)\n lay.addWidget(self.bt_ad, 1, 2)\n lay.addWidget(self.bt_a, 2, 1, 1, 2)\n lay.addWidget(self.pic1, 4, 1, 3, 3)\n lay.addWidget(self.pic2, 7, 1, 3, 3)\n self.setLayout(lay)\n self.connect(self.bt_a, SIGNAL(\"clicked()\"), self.analy)\n self.connect(self.bt_ad, SIGNAL(\"clicked()\"), self.addr)\n\n def analy(self):\n #self.txtout.setText(\"test\")\n pix1 = QPixmap(\"wave.jpg\")\n pix2 = QPixmap(\"frequency.jpg\")\n self.pic1.setPixmap(pix1)\n self.pic2.setPixmap(pix2)\n\n def addr(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file')\n print fname\n p.loaddata(fname)\n p.fft()\n p.draw()\n self.txt.setText(fname)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ui = UI()\n ui.setWindowTitle(u\"้Ÿณ้ข‘ไฟกๅท้ข‘็އๅˆ†ๆž\")\n ui.show()\n app.exec_()\n" }, { "alpha_fraction": 0.5211693644523621, "alphanum_fraction": 0.538306474685669, "avg_line_length": 32.62711715698242, "blob_id": "5111b75e93580e0de322082a6c2feb65aa1f688d", "content_id": "aa03626ed33f54443f278e0724a6a4fc27602ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2012, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/frequency_counter2.py", "repo_name": "chuxiuhong/frequency-counter", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport numpy as np\nfrom scipy.io import wavfile\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n\nclass FrequencyCounter():\n def loaddata(self, filename):\n try:\n samplerate, channels = wavfile.read(filename)\n self.data = np.mean(channels, axis=1)\n except:\n raise ValueError, 'Data Error'\n\n def fft(self, windowsize=4096, samplerate=44100, overlapratio=0.5):\n try:\n self.res = plt.specgram(self.data,\n NFFT=windowsize,\n Fs=samplerate,\n window=mlab.window_hanning,\n noverlap=int(windowsize * overlapratio))[0]\n\n from numpy.core.umath_tests import inner1d\n for i in xrange(len(self.res)):\n self.res[i] = inner1d(self.res[i], self.res[i])\n #plt.plot([x for x in xrange(len(self.res))],self.res)\n except:\n raise ValueError, 'No Data for FFT'\n\n def mainfrequency(self):\n def compare(a, b):\n return int(a[0][0] < b[0][0])\n\n sortlist = [i for i in range(len(self.res))]\n for i in range(len(sortlist)):\n sortlist[i] = (self.res[i], i)\n sortlist.sort(lambda x, y: cmp(sum(x[0]), sum(y[0])))\n #for i in sortlist[:200]:\n #print i[1]\n return sortlist[:5]\n\n def draw(self):\n #plt.figure(figsize=(8,4))\n plt.plot([i for i in xrange(len(self.data))], self.data)\n plt.title(u'้Ÿณ้ข‘ไฟกๅทๆณขๅฝข',fontproperties='SimHei')\n #plt.show()\n plt.savefig('wave.jpg',dpi=70)\n plt.cla()\n plt.plot([i for i in xrange(len(self.res))], self.res)\n plt.title(u'้Ÿณ้ข‘ไฟกๅท้ข‘่ฐฑๅˆ†ๆž',fontproperties='SimHei')\n #plt.show()\n plt.savefig('frequency.jpg',dpi=70)\n\nif __name__ == '__main__':\n p = FrequencyCounter()\n p.loaddata('python-audio\\\\output2.wav')\n p.fft()\n p.draw()\n" } ]
2
mhwahdan/hill-cipher
https://github.com/mhwahdan/hill-cipher
f9e41c83ad92ef900841ac9e9e4c7e93460b139f
6820c53ca9b174fd206a2956ff9670a029e9cd00
a68c6aba289cc3152968d0e1d2a76a50761ffefc
refs/heads/main
2023-08-26T06:32:05.567149
2021-11-04T12:28:52
2021-11-04T12:28:52
424,587,647
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5886745452880859, "alphanum_fraction": 0.5961418747901917, "avg_line_length": 30.52941131591797, "blob_id": "93c03fbe95bf10af95b8445e77ccfdeabf225fc1", "content_id": "cc9f43df0df893974547e4dae03dc5c34bacc1a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1607, "license_type": "no_license", "max_line_length": 96, "num_lines": 51, "path": "/matrixhandler.py", "repo_name": "mhwahdan/hill-cipher", "src_encoding": "UTF-8", "text": "import numpy as np\n\nclass ModularMatrixHandler:\n def __init__(self, modulus):\n self.modulus = modulus\n\n def adjust_number(self, number):\n if number > 0:\n return int(number) % self.modulus\n else:\n if number < -1 * self.modulus:\n return int(number + (int((abs(number) / self.modulus)) + 1) * self.modulus)\n else:\n return self.modulus + int(number)\n\n def check_number(self, number):\n return number < 0 or number > self.modulus\n\n def find_multiplicative_inverse(self, determinant):\n for i in range(self.modulus):\n if (i * determinant) % self.modulus == 1:\n return i\n raise Exception(\"cannot find multiplicative inverse for the number \" + str(determinant))\n\n def has_multiplicative_inverse(self, matrix):\n determinant = self.determinant(matrix)\n for i in range(self.modulus):\n if (i * determinant) % self.modulus == 1:\n return True\n return False\n\n def determinant(self, matrix):\n determinant = np.linalg.det(matrix)\n return self.adjust_number(determinant)\n\n @staticmethod\n def matrix_cofactor(matrix):\n # return cofactor matrix of the given matrix\n matrix = np.transpose(matrix)\n return np.linalg.inv(matrix).T * np.linalg.det(matrix)\n\n @staticmethod\n def multiply(matrix1, matrix2):\n return np.matmul(matrix1, matrix2)\n\n @staticmethod\n def generate(rows, columns):\n return np.zeros(\n (rows, columns),\n dtype=np.int32\n )" }, { "alpha_fraction": 0.5747156143188477, "alphanum_fraction": 0.5811789035797119, "avg_line_length": 39.29166793823242, "blob_id": "c476630fe36594b6686b53098c0ac59104094fef", "content_id": "0bbc2b2f4355fe318753f8e67f586ce6e1c47d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3868, "license_type": "no_license", "max_line_length": 114, "num_lines": 96, "path": "/main.py", "repo_name": "mhwahdan/hill-cipher", "src_encoding": "UTF-8", "text": "# This is a module for implementing hill cipher Algorithm\nfrom math import sqrt\nfrom matrixhandler import ModularMatrixHandler\n\n\nclass HillCipher:\n def __init__(self, key):\n self.matrixHandler = ModularMatrixHandler(27)\n key = key.replace(\" \", \"[\")\n if not (sqrt(len(key)).is_integer()):\n raise Exception(\"This string cannot be used as cipher as its length must have a perfect square root\")\n self.__complexity = int(sqrt(len(key)))\n self.__key = self.__generate_key(key)\n if self.matrixHandler.has_multiplicative_inverse(self.__key):\n self.__inverse_key = self.__generate_inverse_key()\n else:\n raise Exception(\"Key is invalid as its determinant does not have a 16 modular multiplicative inverse\")\n return\n\n def __generate_key(self, string):\n characters = list(string)\n integers = [(ord(c.upper()) - 65) for c in list(characters)]\n for x in integers:\n if self.matrixHandler.check_number(x):\n raise Exception(\"Only characters from a - z or A - Z or spaces are allowed\")\n matrix = ModularMatrixHandler.generate(self.__complexity, self.__complexity)\n iterator = 0\n for row in range(self.__complexity):\n for column in range(self.__complexity):\n matrix[row][column] = integers[iterator]\n iterator += 1\n return matrix\n\n def __generate_inverse_key(self):\n determinant = self.matrixHandler.determinant(self.__key)\n determinant = self.matrixHandler.adjust_number(determinant)\n transpose = self.matrixHandler.matrix_cofactor(self.__key)\n mul_inverse = self.matrixHandler.find_multiplicative_inverse(determinant)\n temp = mul_inverse * transpose\n output = ModularMatrixHandler.generate(self.__complexity, self.__complexity)\n for i in range(self.__complexity):\n for j in range(self.__complexity):\n output[i][j] = self.matrixHandler.adjust_number(int(temp[i][j]) + (1 if temp[i][j] >= 0 else -1))\n return output\n\n def __generate_substring_matrices(self, string):\n string = list(string.replace(\" \", \"[\"))\n string = [x.upper() for x in string]\n while True:\n if len(string) % self.__complexity == 0:\n break\n string = string + [\"[\"]\n strings = [string[i:i+self.__complexity] for i in range(0, len(string), self.__complexity)]\n matrices = []\n for x in strings:\n matrices.append(\n ModularMatrixHandler.generate(self.__complexity, 1)\n )\n for y in range(self.__complexity):\n matrices[-1][y][0] = ord(x[y]) - 65\n return matrices\n\n def __operate(self, text, key):\n matrices = self.__generate_substring_matrices(text)\n ciphers = []\n for x in matrices:\n ciphers.append(ModularMatrixHandler.multiply(key, x))\n x = len(ciphers)\n for i in range(x):\n for j in range(len(ciphers[i])):\n ciphers[i][j] = self.matrixHandler.adjust_number(ciphers[i][j])\n result = \"\"\n for x in ciphers:\n for y in x:\n result += chr(int(y[0]) + 65)\n return result\n\n def encrypt(self, text):\n return self.__operate(text, self.__key)\n\n def decrypt(self, cipher):\n string = self.__operate(cipher, self.__inverse_key).replace(\"[\", \" \").lower()\n string = string[::-1]\n counter = 0\n for x in string:\n if x != \" \":\n break\n counter += 1\n string = string[counter::]\n return string[::-1]\n\n\nif __name__ == \"__main__\":\n _cipher = HillCipher(\"hill\")\n print(_cipher.encrypt(\"retreat now\")) # expected output ==> QP[SBRRJOALP\n print(_cipher.decrypt(\"QP[SBRRJOALP\"))\n" } ]
2
ARAV0411/HackerRank-Solutions-in-Python
https://github.com/ARAV0411/HackerRank-Solutions-in-Python
e2717896721d03a3bc724660cdeb1ffc60d7d5bc
debcd474c0b94dc3854f39345501936a645572eb
ea11a69978dc46bb9b76bc46826d08a62213d98b
refs/heads/master
2022-04-25T20:20:14.714364
2019-10-13T18:48:13
2019-10-13T18:48:13
259,860,096
1
0
null
2020-04-29T07:51:01
2020-04-14T11:01:14
2020-04-24T10:58:36
null
[ { "alpha_fraction": 0.5137420892715454, "alphanum_fraction": 0.52008455991745, "avg_line_length": 25.352941513061523, "blob_id": "7c344ee8c7f9e7aeaf2114b038bafa25dfb79a0d", "content_id": "8e0c4e4c50aa6674cff34ddcd2871b1dafeab7a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/Algorithms Implementation Apple and Orange.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "s,t = raw_input().strip().split(' ')\r\ns,t = [int(s),int(t)]\r\na,b = raw_input().strip().split(' ')\r\na,b = [int(a),int(b)]\r\nm,n = raw_input().strip().split(' ')\r\nm,n = [int(m),int(n)]\r\napple = map(int,raw_input().strip().split(' '))\r\norange = map(int,raw_input().strip().split(' '))\r\napples=oranges=0\r\nfor i in apple:\r\n if (a+i)>=s and (a+i)<=t:\r\n apples+=1\r\nprint apples\r\nfor j in orange:\r\n if (b+j)>=s and (b+j)<=t:\r\n oranges+=1\r\nprint oranges \r\n" }, { "alpha_fraction": 0.5115089416503906, "alphanum_fraction": 0.5268542170524597, "avg_line_length": 22.266666412353516, "blob_id": "1ef950834c9ad3461acf71f751f86d96dfbc2b75", "content_id": "075d9b58d98dae28063a54dd86ed77f4b6ef5f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/Algorithms Implementation Repeated Strings.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "s = raw_input().strip()\r\nn = long(raw_input().strip())\r\nnumber=s.count('a')\r\nif number==0:\r\n print 0\r\nelif number==1 and len(s)==1:\r\n print n\r\nelse:\r\n #For Python2\r\n repeats=n/len(s) \r\n #For Python3\r\n #repeats=n//len(s) \r\n remainders=n%len(s)\r\n #s=s*repeats + s[:remainders]\r\n print str(number*repeats+s[:remainders].count('a')) \r\n \r\n \r\n" }, { "alpha_fraction": 0.6477272510528564, "alphanum_fraction": 0.6534090638160706, "avg_line_length": 33.20000076293945, "blob_id": "378d637e9462c73f52d3d03581637bca11f982ce", "content_id": "4518515c5e1e1d2ad875c59a49adf646302caab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 70, "num_lines": 5, "path": "/numpy Min, Max.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nN,M = map(int, raw_input().split())\r\narray= numpy.array( [map(int, raw_input().split()) for i in range(N)])\r\nMin= numpy.min(array, axis=1)\r\nprint numpy.max(Min)\r\n" }, { "alpha_fraction": 0.551948070526123, "alphanum_fraction": 0.5649350881576538, "avg_line_length": 28, "blob_id": "8d72a0e05824be6709bb8672f5fb3f7ebd9f4c1f", "content_id": "4822111752c57c200fdb6f9b18dc452e76b48831", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Regex Validating and Parsing.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\n\r\nn=input()\r\nfor i in range(n):\r\n line=raw_input()\r\n name,email=line.split(' ')\r\n pattern=\"<[a-z][a-zA-Z0-9\\-\\.\\_]+@[a-zA-Z]+\\.[a-zA-Z]{1,3}>\"\r\n if bool(re.match(pattern,email)):\r\n print name,email\r\n " }, { "alpha_fraction": 0.6303317546844482, "alphanum_fraction": 0.6445497870445251, "avg_line_length": 24.375, "blob_id": "1a42495f7bbe9cb3609f5df74f949a1677a8c7d7", "content_id": "a133a968b9048f73e366b8b7b03848f75ba49bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/Algorithms Implementation PDF Design Viewer.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "h = map(int,raw_input().strip().split(' '))\r\nword = raw_input().strip()\r\nwidth= 1*len(word)\r\nwordHeights=[]\r\nfor w in word:\r\n wordHeights.append(h[ord(w)-97])\r\nheight= max(wordHeights)\r\nprint height * width\r\n" }, { "alpha_fraction": 0.5733113884925842, "alphanum_fraction": 0.5831960439682007, "avg_line_length": 22.73469352722168, "blob_id": "df746efefa6fdf0c414afbbce0e51da52467dbfd", "content_id": "31dd4e6c3afbe86c5280a163c148177bf0b4b752", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 122, "num_lines": 49, "path": "/Regex and Parsing Group, Groups and GroupDict.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import re\r\nexpression=r\"([a-zA-Z0-9])\\1+\"\r\nm=re.search(expression,raw_input())\r\nif m:\r\n print m.group(1)\r\nelse:\r\n print -1 \r\n \r\n \r\n \r\n'''\r\ngroup()\r\n\r\nA group() expression returns one or more subgroups of the match.\r\nCode\r\n\r\n>>> import re\r\n>>> m = re.match(r'(\\w+)@(\\w+)\\.(\\w+)','[email protected]')\r\n>>> m.group(0) # The entire match \r\n'[email protected]'\r\n>>> m.group(1) # The first parenthesized subgroup.\r\n'username'\r\n>>> m.group(2) # The second parenthesized subgroup.\r\n'hackerrank'\r\n>>> m.group(3) # The third parenthesized subgroup.\r\n'com'\r\n>>> m.group(1,2,3) # Multiple arguments give us a tuple.\r\n('username', 'hackerrank', 'com')\r\n\r\ngroups()\r\n\r\nA groups() expression returns a tuple containing all the subgroups of the match.\r\nCode\r\n\r\n>>> import re\r\n>>> m = re.match(r'(\\w+)@(\\w+)\\.(\\w+)','[email protected]')\r\n>>> m.groups()\r\n('username', 'hackerrank', 'com')\r\n\r\ngroupdict()\r\n\r\nA groupdict() expression returns a dictionary containing all the named subgroups of the match, keyed by the subgroup name.\r\nCode\r\n\r\n>>> m = re.match(r'(?P<user>\\w+)@(?P<website>\\w+)\\.(?P<extension>\\w+)','[email protected]')\r\n>>> m.groupdict()\r\n{'website': 'hackerrank', 'user': 'myname', 'extension': 'com'}\r\n\r\n''' " }, { "alpha_fraction": 0.6317135691642761, "alphanum_fraction": 0.644501268863678, "avg_line_length": 24.200000762939453, "blob_id": "ab99d7413a80fc5f34e9f63c843788fb79c78e95", "content_id": "cf2070e5dff626bcfc63dfcbbac149a4ed728f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/Day 5, Statistics Poisson Distribution.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Poisson Distribution \r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport math\r\nmean,k=input(),input()\r\ndef factorial(n):\r\n if n==0 or n==1:\r\n return 1\r\n else:\r\n return n*factorial(n-1)\r\n\r\ndef poissonDistribution(mean,k):\r\n temp=((mean**k)*(math.e **(-mean)))/factorial(k)\r\n return temp\r\npd=poissonDistribution(mean,k)\r\nprint round(pd,3)" }, { "alpha_fraction": 0.4901960790157318, "alphanum_fraction": 0.5014005899429321, "avg_line_length": 25.384614944458008, "blob_id": "9ee61d15898b59bf952d6c99d4af59bc9e36ac55", "content_id": "88bb9b7135952c452d501f752e263850f13054b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/Collections Deque using eval.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nn= input()\r\nd= deque()\r\ncode= [raw_input().split() for i in range(n)]\r\nfor i in code:\r\n if len(i)> 1:\r\n val= i[-1]\r\n op= i[0] + \"(\" + val + \")\"\r\n eval('d.'+ op)\r\n else:\r\n eval('d.'+ i[0] +\"()\")\r\nprint \" \".join(map(str, d)) " }, { "alpha_fraction": 0.4182509481906891, "alphanum_fraction": 0.4904943108558655, "avg_line_length": 19.25, "blob_id": "cb363f57f4cdeae7e12fd097b78ad49d1dba2d25", "content_id": "68532fddd24373854db5aaac19e52a103cee4788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/Algorithms Implementation Kangaroo.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "x1,v1,x2,v2 = raw_input().strip().split(' ')\r\nx1,v1,x2,v2 = [int(x1),int(v1),int(x2),int(v2)]\r\n\r\nif v1>v2:\r\n posDiff=x2-x1 \r\n velDiff=v1-v2\r\n if posDiff%velDiff ==0:\r\n print \"YES\"\r\n else:\r\n print \"NO\" \r\nelse:\r\n print \"NO\" \r\n " }, { "alpha_fraction": 0.7394366264343262, "alphanum_fraction": 0.7394366264343262, "avg_line_length": 26.799999237060547, "blob_id": "a2e2db2bb9a6a7a5b634d9d8c0dd884e73bb47b9", "content_id": "3753f90c993fae6c30433cac3ebccfc3ca89eda6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/numpy floor, ceil and rint.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\narray= numpy.array( map(float, raw_input().split()))\r\nprint numpy.floor(array)\r\nprint numpy.ceil(array)\r\nprint numpy.rint(array)" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7235772609710693, "avg_line_length": 28.75, "blob_id": "72ab255c3abe0d96f6c9f2c8fbafbfd43ba1d11d", "content_id": "f8a93af55586c550f498af5365943c025ff18aea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/Day 5, Statistics Poisson Distribution 2.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Poisson Distribution\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\nmeanOfA,meanOfB=map(float,raw_input().split())\r\nCa=160+40*(meanOfA+meanOfA**2)\r\nCb=128+40*(meanOfB+meanOfB**2)\r\nprint round(Ca,3)\r\nprint round(Cb,3)\r\n" }, { "alpha_fraction": 0.4383358061313629, "alphanum_fraction": 0.48439821600914, "avg_line_length": 20.03333282470703, "blob_id": "b0d596b4396a03e05060c8c2ef3da02865ea5678", "content_id": "7ef0bd375517e455d52f377606483889d8387d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 64, "num_lines": 30, "path": "/Day 1,Statistics Inter Quartile Range.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "num=input()\r\nx=map(int,raw_input().split())\r\nf=map(int,raw_input().split())\r\ndata=[]\r\n\r\nfor i in range(num):\r\n data+=[x[i]]*f[i]\r\n \r\ndef median(values):\r\n n=len(values)\r\n if n%2==1:\r\n return sorted(values)[(n+1)/2 - 1]\r\n else:\r\n return round(sum(sorted(values)[(n/2)-1:(n/2)+1])/2.0,1)\r\n \r\ndef quartiles(values):\r\n n=len(values)\r\n values.sort()\r\n Q2=median(values)\r\n Q1=median(values[:n/2])\r\n \r\n if n%2==0:\r\n Q3=median(values[n/2:]) \r\n else:\r\n Q3=median(values[n/2+1:])\r\n \r\n return Q1,Q2,Q3\r\n \r\nQ1,Q2,Q3=quartiles(data)\r\nprint round(float(Q3-Q1),1) \r\n " }, { "alpha_fraction": 0.5982906222343445, "alphanum_fraction": 0.6149775981903076, "avg_line_length": 25, "blob_id": "336c29fc761062fb17c027ca463e34ef3dcc579d", "content_id": "225283347fa42a37f206c2bda944f503348e76a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2457, "license_type": "no_license", "max_line_length": 72, "num_lines": 91, "path": "/Regression.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import pylab\r\n\r\nvals=map(int,raw_input().split())\r\nf,n=vals[0],vals[1]\r\ndata=[]\r\nfactors=[]\r\nR=[]\r\npricePerSqFeet=[]\r\nresults=[]\r\n\r\n\r\nfor observations in range(n):\r\n data.append(map(float,raw_input().split()))\r\n pricePerSqFeet.append(data[observations][-1])\r\n \r\n result=1\r\n for factor in range(f):\r\n result*=data[observations][factor]\r\n factors.append(result) \r\n\r\ndef problemSolving(xValue):\r\n bestFit=R.index(max(R))\r\n if (bestFit+1)==1:\r\n ans=fit1(factors,pricePerSqFeet)\r\n print ans[0]*xValue+ans[1]\r\n \r\n elif (bestFit+1)==2:\r\n ans=fit2(factors,pricePerSqFeet)\r\n print ans[0]*(xValue**2)+ans[1]*xValue+ans[2]\r\n \r\n elif (bestFit+1)==3:\r\n ans=fit3(factors,pricePerSqFeet) \r\n print ans[0]*(xValue**3)+ans[1]*(xValue**2)+ans[2]*xValue+ans[3]\r\n return\r\n\r\n \r\n \r\n#print factors,pricePerSqFeet,data \r\n\r\ndef rSquare(measured, estimated):\r\n SEE = ((estimated - measured)**2).sum()\r\n mMean = sum(measured)/float(len(measured))\r\n MV = sum(((mMean - measured)**2))\r\n return 1 - SEE/MV\r\n \r\ndef fit1(factors,pricePerSqFeet):\r\n xVals,yVals=pylab.array(factors), pylab.array(pricePerSqFeet) \r\n a,b=pylab.polyfit(xVals,yVals,1)\r\n estVals=a*xVals+b\r\n results=[]\r\n results.append(a)\r\n results.append(b)\r\n R.append(rSquare(factors,estVals))\r\n return results\r\n \r\ndef fit2(factors,pricePerSqFeet):\r\n xVals,yVals=pylab.array(factors), pylab.array(pricePerSqFeet) \r\n a,b,c=pylab.polyfit(xVals,yVals,2)\r\n estVals=a*(xVals**2)+b*xVals+c\r\n results=[]\r\n results.append(a)\r\n results.append(b)\r\n results.append(c)\r\n R.append(rSquare(factors,estVals))\r\n return results \r\n\r\ndef fit3(factors,pricePerSqFeet):\r\n xVals,yVals=pylab.array(factors), pylab.array(pricePerSqFeet) \r\n a,b,c,d=pylab.polyfit(xVals,yVals,3)\r\n estVals=a*(xVals**3)+b*(xVals**2)+c*xVals+d\r\n results=[]\r\n results.append(a)\r\n results.append(b)\r\n results.append(c)\r\n results.append(d)\r\n R.append(rSquare(factors,estVals))\r\n return results \r\n\r\nfit1(factors,pricePerSqFeet)\r\nfit2(factors,pricePerSqFeet)\r\nfit3(factors,pricePerSqFeet)\r\n\r\nnewFactors=[] \r\nt=input()\r\nfor testCase in range(t):\r\n newFactors.append(map(float,raw_input().split()))\r\n result=1\r\n for factor in range(f):\r\n result*=newFactors[testCase][factor] \r\n \r\n problemSolving(result)\r\n" }, { "alpha_fraction": 0.49113476276397705, "alphanum_fraction": 0.5017730593681335, "avg_line_length": 21.559999465942383, "blob_id": "0585d919408e65baebfee72c28dbc9fb1f8af145", "content_id": "fb939f3eb1e048a7ee7b0b8e9de369f5d57707dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 76, "num_lines": 25, "path": "/Day 8: Least Square Regression Line.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "maths, stats = [],[]\n\nfor i in range(5):\n m, s= map(int,raw_input().split())\n maths.append(m)\n stats.append(s)\n\ndef b_value(x,y):\n n = len(x)\n xy =[x[i]*y[i] for i in range(n) ]\n x_square = [i**2 for i in x]\n # y_square = [i**2 for i in y]\n \n b = (n*(sum(xy))-((sum(x)*sum(y))))/float(((n*sum(x_square))-sum(x)**2))\n return b\n \ndef ab_values(x,y):\n x_mean = sum(x)/float(len(x)) \n y_mean = sum(y)/float(len(y))\n b = b_value(x,y)\n a = y_mean - b*x_mean\n return a,b\n \na,b = ab_values(maths,stats)\nprint a + b*80\n" }, { "alpha_fraction": 0.6907894611358643, "alphanum_fraction": 0.6907894611358643, "avg_line_length": 28.799999237060547, "blob_id": "77c5c2a205bd04f297941271f32fba778551f841", "content_id": "78bef31858881a8085471a85647d96b81c5547fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 44, "num_lines": 5, "path": "/numpy Inner Outer.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nA=numpy.array(map(int, raw_input().split()))\r\nB=numpy.array(map(int, raw_input().split()))\r\nprint numpy.inner(A,B)\r\nprint numpy.outer(A,B)" }, { "alpha_fraction": 0.5875706076622009, "alphanum_fraction": 0.6525423526763916, "avg_line_length": 33.599998474121094, "blob_id": "a5d127f4258439042179d25a68041734dcba0bda", "content_id": "cdbed267f1948403ca7e24a2a32986a962c066bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 80, "num_lines": 10, "path": "/Day 5, Statistics Normal Distribution 2.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import math\r\nmean,sd=map(float,raw_input().split())\r\nx,y=input(),input()\r\n\r\ndef normalDistribution(x, mean, sd):\r\n return round(0.5 * 100 * (1 + math.erf((x - mean)/ (sd * math.sqrt(2)))), 3)\r\n\r\nprint round(100 - normalDistribution(x, mean, sd), 2)\r\nprint round(100 - normalDistribution(y, mean, sd), 2)\r\nprint round(normalDistribution(60, 70, 10), 2)" }, { "alpha_fraction": 0.5724465847015381, "alphanum_fraction": 0.5748218297958374, "avg_line_length": 29.615385055541992, "blob_id": "1f195a45050270f4608b722356b481899767a809", "content_id": "e8395778b3d5a4143841c8deab6193facaff8eb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/Collections Most Common Moderate Problem.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\ns= raw_input()\r\nstring= [i for i in s]\r\ncounter= Counter(string)\r\nstring= sorted(set(string))\r\nfor i in range(3):\r\n for j in string:\r\n if counter[j]== max(counter.values()):\r\n print j+\" \"+ str(max(counter.values()))\r\n counter.pop(j) \r\n break\r\n print counter\r\n \r\n" }, { "alpha_fraction": 0.64402174949646, "alphanum_fraction": 0.66847825050354, "avg_line_length": 39.11111068725586, "blob_id": "36105a7ef0561554e56634d10037eccdcc6dceb5", "content_id": "cfc4567bc6855b00b9c2ac09320c50cdeec40041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 86, "num_lines": 9, "path": "/Day 6, Statistics Central Limit Theorem.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport math\r\nmaxWeight,n,mean,sd=input(),input(),input(),input()\r\ndef centralLimitTheorem(maxWeight,n,mean,sd):\r\n newMean=n*mean\r\n newSd=(n**0.5)*sd \r\n return round(0.5 * (1 + math.erf((maxWeight - newMean) / (newSd * (2 ** 0.5)))),4)\r\n\r\nprint centralLimitTheorem(maxWeight,n,mean,sd)" }, { "alpha_fraction": 0.6527131795883179, "alphanum_fraction": 0.6713178157806396, "avg_line_length": 22.5, "blob_id": "af776bb3a59d21c271c6311de092c1ab7dc0f01d", "content_id": "4d39b182f0808db0719dafadd2b6f80a83f961e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 69, "num_lines": 26, "path": "/Machine Learning Battery Life.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n# Enter your code here\r\nimport pylab, sys\r\ndataFile=open('Battery.txt')\r\nbatteryCharged=[]\r\nbatteryLasted=[]\r\nfor i in dataFile:\r\n i=i.split(',')\r\n batteryCharged.append(float(i[0]))\r\n batteryLasted.append(float(i[1]))\r\npylab.plot(batteryCharged,batteryLasted,'ro')\r\npylab.show() \r\nbC=pylab.array(batteryCharged)\r\nbL=pylab.array(batteryLasted)\r\na,b=pylab.polyfit(bC,bL,1)\r\na,b=round(a,2) , round(b,2)\r\n#print a,b\r\n\r\n\r\ndata = float(sys.stdin.readline())\r\n\r\n# Enter your code here\r\nif data>0 and data<=4:\r\n print round(data*2,2)\r\nelse:\r\n print 8.00 \r\n\r\n \r\n" }, { "alpha_fraction": 0.637499988079071, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 30.399999618530273, "blob_id": "bef21c51af1f4b032886bb79731561a78a548e4e", "content_id": "5e974db7c2ce13847e209911a1c77a99c41975f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 69, "num_lines": 5, "path": "/Math Find Mod.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\na=input()\r\nb=input()\r\nans= divmod(a,b)\r\nprint str(ans[0])+\"\\n\"+ str(ans[1])+\"\\n\"+str(ans)" }, { "alpha_fraction": 0.6477272510528564, "alphanum_fraction": 0.6477272510528564, "avg_line_length": 27.33333396911621, "blob_id": "33f56541fb2833897c07b49c9bea439550b0ce96", "content_id": "b8f6e28ebfda0849fb3aa716f51e9fc2ca54149e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/Math Integers Come in all sizes.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\na=int(raw_input())\r\nb=int(raw_input())\r\nc=int(raw_input())\r\nd=int(raw_input())\r\nprint str( a**b + c**d)\r\n" }, { "alpha_fraction": 0.661596953868866, "alphanum_fraction": 0.6730037927627563, "avg_line_length": 27.44444465637207, "blob_id": "e07e4d9d15412f672f52e55bea9d5ec24eeecaf0", "content_id": "5e0b59060ef43ddef1eda98fe19acdfaa61b3fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 69, "num_lines": 9, "path": "/Regex and Parsing Findall.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\nexpression=r\"[^aeiouAEIOU]([aeiouAEIOU]{2,})[^aeiouAEIOU]\"\r\nresult=re.findall(expression,raw_input())\r\nif len(result)!=0:\r\n for i in result:\r\n print i\r\nelse:\r\n print -1" }, { "alpha_fraction": 0.443708598613739, "alphanum_fraction": 0.4503311216831207, "avg_line_length": 43.66666793823242, "blob_id": "fad4c8e9b1e3f676b642ff0a4010c8626c940fb1", "content_id": "d9c6d3c46d5b19d1f9dc8b62a090221c00b12aec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 70, "num_lines": 3, "path": "/Numpy Arrays.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\narr= numpy.array(map(float, raw_input().split()))\r\nprint arr[::-1] # reverse an array\r\n\r\n " }, { "alpha_fraction": 0.619684100151062, "alphanum_fraction": 0.6269744634628296, "avg_line_length": 25.884614944458008, "blob_id": "90055c54cbf415e57fe57074fa7b04430a0d7fc4", "content_id": "e3220ae095077195c87ebf7bc96b74913e622c6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 823, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/Day 2, Statistics More Dice.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Dice Probability evenly weighed\r\nimport random\r\nimport itertools\r\ndice=[1,2,3,4,5,6]\r\nn=input(\"Please enter the number of Dices: \")\r\n\r\ndef diceRolls(dice):\r\n a=itertools.product(dice,dice)\r\n diceCombos=[i for i in a]\r\n return diceCombos\r\n\r\ndef sumProbability(rs,dices):\r\n requiredSum=rs\r\n result= [(i,j) for i,j in diceCombinations if i!=j and i+j==requiredSum ]\r\n print \"result =\"+str(result)\r\n return result\r\n \r\ndef findProbability(event,total):\r\n return float(event)/total\r\n \r\ndiceCombinations=diceRolls(dice)\r\nrs=input(\"Please enter required sum : \")\r\nevent=sumProbability(rs,diceCombinations)\r\neventLen=len(event)\r\ntotalLen=len(diceCombinations)\r\nprint findProbability(eventLen,totalLen)\r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.5806451439857483, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 28.66666603088379, "blob_id": "1464decbbe0c1eb43107b93ba26c7f7206dc9666", "content_id": "19cbbe5129be500ac8e385b0e5a94a918f68b01b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 64, "num_lines": 6, "path": "/Sets Superset.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "A,N= set(input().split()) , int(input())\r\nans=True\r\nfor i in range(N):\r\n check= set(input().split())\r\n ans= (ans) and ( A.issuperset(check)) and len(A)> len(check)\r\nprint (ans) " }, { "alpha_fraction": 0.39534884691238403, "alphanum_fraction": 0.43023255467414856, "avg_line_length": 19.08333396911621, "blob_id": "de81a57dc0a09588fecbdddd2bc580dd52881269", "content_id": "614cd4256487db87c8f51e75647d256efebba9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 37, "num_lines": 12, "path": "/Algorithms Implementation Save the Prisoner Long.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "t=input()\r\nfor testCases in range(t):\r\n vals=map(int,raw_input().split())\r\n n,m,s=vals[0],vals[1],vals[2]\r\n prisoners=range(1,n+1)\r\n while m!=1:\r\n if s==n:\r\n s=1\r\n else:\r\n s+=1\r\n m-=1\r\n print s \r\n " }, { "alpha_fraction": 0.576331377029419, "alphanum_fraction": 0.6142011880874634, "avg_line_length": 27.20689582824707, "blob_id": "45f7a2f81070c5d60f7b520b713fed672bbfa74b", "content_id": "656587fa5c6e115b36596a193e3bc7f39ffd591c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 113, "num_lines": 29, "path": "/Collections Named Tuple.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nN,students= input(), namedtuple('students',raw_input().split())\r\nstud= [students(*raw_input().split()) for i in range(N)] #passed string of input as a single key\r\nprint sum([float (i.MARKS) for i in stud]) /N\r\n\r\n \r\n''' \r\nCode 01\r\n\r\n>>> from collections import namedtuple\r\n>>> Point = namedtuple('Point','x,y')\r\n>>> pt1 = Point(1,2)\r\n>>> pt2 = Point(3,4)\r\n>>> dot_product = ( pt1.x * pt2.x ) +( pt1.y * pt2.y )\r\n>>> print dot_product\r\n11\r\nCode 02\r\n\r\n>>> from collections import namedtuple\r\n>>> Car = namedtuple('Car','Price Mileage Colour Class')\r\n>>> xyz = Car(Price = 100000, Mileage = 30, Colour = 'Cyan', Class = 'Y')\r\n>>> print xyz\r\nCar(Price=100000, Mileage=30, Colour='Cyan', Class='Y')\r\n>>> print xyz.Class\r\nY\r\n\r\n\r\n'''" }, { "alpha_fraction": 0.455089807510376, "alphanum_fraction": 0.46706587076187134, "avg_line_length": 23.384614944458008, "blob_id": "b9f49e7fc662e3eacc7b2440a93358a994216ce1", "content_id": "d559156e3b6d1c2e1cc9a6685ee29c4e83643917", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/Algorithms Implementation Angry Professor.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "t = int(raw_input().strip())\r\nfor a0 in xrange(t):\r\n n,k = raw_input().strip().split(' ')\r\n n,k = [int(n),int(k)]\r\n a = map(int,raw_input().strip().split(' '))\r\n count=0\r\n for arrivalTime in a:\r\n if arrivalTime<=0:\r\n count+=1\r\n if count>=k:\r\n print \"NO\"\r\n else:\r\n print \"YES\"\r\n " }, { "alpha_fraction": 0.4704519212245941, "alphanum_fraction": 0.4994206130504608, "avg_line_length": 23.727272033691406, "blob_id": "367d344cd2aeb7284d04e5d107768e0a83e304cf", "content_id": "cdbd373d1060c7e11c7a809ce7157a47889fca83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 69, "num_lines": 33, "path": "/DataStructures Stack.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nn=input()\r\nclass Stack(object):\r\n def __init__(self):\r\n self.stack=[]\r\n self.maximum=-99999999\r\n \r\n def push(self,element):\r\n self.stack.append(element)\r\n if element>self.maximum:\r\n self.maximum=element\r\n \r\n def pop(self):\r\n if len(self.stack)!=0:\r\n self.stack.remove(self.stack[-1])\r\n self.maximum=max(self.stack)\r\n else:\r\n self.maximum=-99999999 \r\n \r\n def maximum(self):\r\n return self.maximum\r\n \r\ns=Stack() \r\nfor i in range(n):\r\n command=map(int,raw_input().split())\r\n if command[0]==1:\r\n s.push(command[1])\r\n \r\n elif command[0]==2:\r\n s.pop()\r\n \r\n elif command[0]==3:\r\n print s.maximum()\r\n \r\n " }, { "alpha_fraction": 0.6381322741508484, "alphanum_fraction": 0.6536964774131775, "avg_line_length": 40.66666793823242, "blob_id": "427601166ccc5624317e9fea05adc163236321ae", "content_id": "3b9c4753546d77f20ca089d9ec27b3da2c4eb04c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 71, "num_lines": 6, "path": "/Numpy Shapes.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\narr= numpy.array(map(int, raw_input().split())\r\nprint numpy.reshape(arr, (3,3)) # modifies the shape of the array\r\n\r\n#print arr.shape() --- prints the rows and columns of array in tuples\r\n# arr.shape()= (3,4) --- reshapes the array \r\n" }, { "alpha_fraction": 0.642201840877533, "alphanum_fraction": 0.6483180522918701, "avg_line_length": 26, "blob_id": "d1a9ddbc5fff6984ffd5bc03a061a585a27907ae", "content_id": "88d6ecc6fa702397e695b17c57a370479c266cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/itertools iterables.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import *\r\nN=input()\r\nalpha=raw_input().split()\r\nk=input()\r\npossibilities= list(combinations(sorted(alpha),k))\r\ncount=0\r\nfor i in possibilities:\r\n if 'a' in i:\r\n count+=1\r\nprint count / float(len(possibilities)) \r\n \r\n\r\n" }, { "alpha_fraction": 0.5674827098846436, "alphanum_fraction": 0.5806654095649719, "avg_line_length": 33.55813980102539, "blob_id": "0e2da805739883b22a299ed3416a2248f5000da4", "content_id": "0a44d76f85cb6f716aa1266ccac26e416b15eecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1593, "license_type": "no_license", "max_line_length": 85, "num_lines": 43, "path": "/Snakes&Ladder.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nT=input()\r\ndef gamePlay(dice,position,ladders,snakes):\r\n ladders.sort()\r\n snakes.sort()\r\n ladderLow=[ladders[i][0] for i in range(len(ladders))]\r\n snakesHead=[snakes[i][0] for i in range(len(snakes))]\r\n count=0\r\n while (position<=98):\r\n roll=max(dice)\r\n position+=roll\r\n if position in snakesHead and roll!=1:\r\n roll-=1\r\n if position in snakesHead and roll==1:\r\n position = snakes[snakesHead.index(position)][1] \r\n \r\n \r\n if position in ladderLow:\r\n position= ladders[ladderLow.index(position)][1]\r\n count+=1 \r\n print snakesHead\r\n print ladderLow\r\n print \"count =\"+str(count) \r\n print \"position=\"+str(position) \r\n print \"position=\"+str(position) \r\n return count \r\n \r\nfor testCase in range(T):\r\n N=input()\r\n ladders=[raw_input().split() for n in range(N)]\r\n M=input()\r\n snakes=[raw_input().split() for m in range(M)]\r\n position=1\r\n dice=[1,2,3,4,5,6]\r\n #Select the higher number\r\n #Check if there's a snake head\r\n #if there's a snake head go for the next highest number\r\n #if there's no snake head go for another roll and make a count of the step.\r\n #if there's a ladder bottom in the position, change the position and make a count\r\n #make sure the position reaches till 98.\r\n #steps=0\r\n maxCount=gamePlay(dice,position,ladders,snakes)\r\n print maxCount\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.43457943201065063, "alphanum_fraction": 0.44859811663627625, "avg_line_length": 22.52941131591797, "blob_id": "49ed7919c6537863d160890fb23e76f48091e0bc", "content_id": "85c1e75015c5b69e925db06594f5c9166b16b2ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/Regex Validating Phone Numbers.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import re\r\ndef phoneNumberVerification():\r\n n=input()\r\n pattern=\"^[789]\"\r\n for i in range(n):\r\n number=raw_input()\r\n if len(number)==10 and number.isdigit():\r\n ans=re.findall(pattern,number)\r\n if len(ans)==1:\r\n print \"YES\"\r\n else:\r\n print \"NO\" \r\n else:\r\n print \"NO\" \r\n\r\n\r\nphoneNumberVerification() " }, { "alpha_fraction": 0.6062802076339722, "alphanum_fraction": 0.6231883764266968, "avg_line_length": 27.571428298950195, "blob_id": "8e146b603b7be1e835aaeba62d966031d1f0397d", "content_id": "54116c811997f69c414e77cf40e36f58562a44cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 69, "num_lines": 14, "path": "/Collections Shoe Size Money problem.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nX=input()\r\nshoeSizes=map(int, raw_input().split())\r\nshoeSizes=Counter(shoeSizes)\r\nN= input()\r\ncustomer= [ map(int, raw_input().split()) for i in range(N)]\r\nmoney=0\r\nfor i in customer:\r\n if (i[0] in shoeSizes) and ( shoeSizes[i[0]] > 0) :\r\n money+=i[1]\r\n shoeSizes[i[0]]-=1 \r\n \r\nprint money\r\n" }, { "alpha_fraction": 0.6089385747909546, "alphanum_fraction": 0.6536312699317932, "avg_line_length": 23.285715103149414, "blob_id": "e373d035cb9b1303c8bdee20086845b7cb3d0d9c", "content_id": "597ee7daeb94f942c8a1892dd479ea4c4defcc44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/DataStructures Arrays Occurrence.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "Q1=input()\r\nlistQ1=[raw_input() for string in range(Q1)]\r\nQ2=input()\r\nlistQ2=[raw_input() for string in range(Q2)]\r\n\r\nfor string in listQ2:\r\n print listQ1.count(string) " }, { "alpha_fraction": 0.5391923785209656, "alphanum_fraction": 0.5581947565078735, "avg_line_length": 25.133333206176758, "blob_id": "87833fe3c8e57efd7b5a452e6b8998b4ae1e54c0", "content_id": "e0e3cca88dd00af16ec23ecbe2849e3ce0a77fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/Algorithms Implementation Beautiful Days.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nvals=raw_input().split()\r\ni,j,k=vals[0],vals[1],int(vals[2])\r\ndef beautiful(day,k):\r\n if ( int(day)- int(day[::-1]) ) % k == 0:\r\n return True\r\n else:\r\n return False\r\ncount=0\r\nfor days in range(int(i),int(j)+1):\r\n #print days, type(days)\r\n \r\n if beautiful(str(days),k)==True:\r\n count+=1\r\nprint count \r\n \r\n" }, { "alpha_fraction": 0.4554597735404968, "alphanum_fraction": 0.5014367699623108, "avg_line_length": 22.75, "blob_id": "a14116f7e8d47c39a610f0959cdcfba26fafe0f1", "content_id": "99413c8e8d0796995f5597f836e37b5fe6101f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/Day 1,Statistics Quartiles.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n= input()\r\nvals= map(int,raw_input().split())\r\n\r\ndef median(values):\r\n n=len(values)\r\n print \"values=\",values\r\n if n%2==1:\r\n return sorted(values)[(n+1)/2 - 1]\r\n else:\r\n return sum(sorted(values)[(n/2)-1:(n/2)+1])/2\r\n \r\ndef quartiles(values):\r\n n=len(values)\r\n values.sort()\r\n Q2=median(values)\r\n Q1=median(values[:n/2])\r\n print \"values=\",values\r\n if n%2==0:\r\n print \"values=\",values[n/2:]\r\n Q3=median(values[n/2:]) \r\n else:\r\n print \"values=\",values[n/2+1:]\r\n Q3=median(values[n/2+1:])\r\n \r\n return Q1,Q2,Q3\r\n \r\nQ1,Q2,Q3=quartiles(vals)\r\nprint str(Q1)+\"\\n\"+str(Q2)+\"\\n\"+str(Q3) " }, { "alpha_fraction": 0.5191347599029541, "alphanum_fraction": 0.5474209785461426, "avg_line_length": 18.821428298950195, "blob_id": "1723a78b852e1f242932a49af2738c8913cca5c7", "content_id": "44628a047b165d6a48fa37f313fd31ab60ba2e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/Day 4, Statistics Binomial Distribution 2.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\np,n=map(float,raw_input().split())\r\np/=100\r\nq=1-p\r\ndef factorial(n):\r\n if n==0 or n==1:\r\n return 1\r\n else:\r\n return n*factorial(n-1)\r\n \r\ndef nCr(n,r):\r\n return factorial(n)/(factorial(r) * factorial(n-r))\r\n\r\nfirstSum=0\r\nfor i in range(0,3):\r\n ncr=nCr(n,i)\r\n temp=(p**i) * (q** (n-i))\r\n firstSum+=ncr*temp\r\n \r\nprint round(firstSum,3)\r\n\r\nsecondSum=0\r\nfor i in range(2,11):\r\n ncr=nCr(n,i)\r\n temp=(p**i) * (q** (n-i))\r\n secondSum+=ncr*temp\r\n\r\nprint round(secondSum,3) \r\n\r\n \r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5836575627326965, "alphanum_fraction": 0.5836575627326965, "avg_line_length": 23.899999618530273, "blob_id": "8509266accef9914842cef2f73263f7059dae108", "content_id": "87e8d08033811a956cae27d32ba9b5e7584962a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 60, "num_lines": 10, "path": "/numpy Array Arithmetics.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nN,M = map(int, raw_input().split())\r\nA = numpy.array([raw_input().split() for _ in range(N)],int)\r\nB = numpy.array([raw_input().split() for _ in range(N)],int)\r\nprint(A + B)\r\nprint(A - B)\r\nprint(A * B)\r\nprint(A / B)\r\nprint(A % B)\r\nprint A ** B" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 35.42856979370117, "blob_id": "8b57cb744b36d71265d890ddecb73d728dedbd1b", "content_id": "57e157c1c688719767f52b56ddfa4dded6a59614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/Sort a particular column using Lambda.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nN, M =map(int, raw_input().split())\r\nsample= [ map(int, raw_input().split()) for i in range(N)]\r\nk=input()\r\nsample.sort(key=lambda x: x[k])\r\nfor i in sample:\r\n print \" \".join(map(str,i))\r\n\r\n \r\n" }, { "alpha_fraction": 0.5634920597076416, "alphanum_fraction": 0.5899471044540405, "avg_line_length": 15.181818008422852, "blob_id": "a68514b0cced5dbb77ff9ec85827f0bf72940012", "content_id": "d3ff4c3f92b2a046541545b6bd45fd2c16d8141f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/Second Largest Num.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nN= input()\r\nL= raw_input().split()\r\nI=[]\r\nassert N>=2 and N<=10\r\n#print type(L[0])\r\nfor i in L:\r\n I.append(int(i))\r\nI.sort()\r\n#print L\r\nlargest=max(I)\r\n#print \"Large=\" + str(largest)\r\ncount=I.count(largest)\r\nfor i in range(count):\r\n I.remove(largest)\r\nprint max(I)\r\n\r\n\r\n \r\n\r\n5\r\n-7 -7 -7 -7 -6\r\n" }, { "alpha_fraction": 0.5861027240753174, "alphanum_fraction": 0.616314172744751, "avg_line_length": 23.461538314819336, "blob_id": "4ec693d8c853b04f8ce6dce851eb609cf15f49f2", "content_id": "50d1a072a52139ea49f5be34639214bf92a16a88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/itertools problem.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import *\r\nK,M= map(int, raw_input().split())\r\nList=[]\r\nassert K>=1 and K<=7\r\nassert M>=1 and M <=1000\r\n\r\nfor i in range(K):\r\n List.append(map(int,raw_input().split()[1:]))\r\nSum=0\r\nfor i in product(*List):\r\n Sum += (i**2) \r\nprint Sum % M\r\n" }, { "alpha_fraction": 0.4107883870601654, "alphanum_fraction": 0.4315352737903595, "avg_line_length": 21.5, "blob_id": "9061085fd1d395128053fe1a4539ade674b28910", "content_id": "64515abdaed16235eb3545a5421f3bae9df6fcb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/Algorithms Implementation Find the Digit.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "t = int(raw_input().strip())\r\nfor a0 in xrange(t):\r\n n = raw_input().strip()\r\n \r\n count=0\r\n for digit in n:\r\n if int(digit)!=0:\r\n if (int(n) % int(digit)) == 0:\r\n count+=1\r\n print count " }, { "alpha_fraction": 0.48124998807907104, "alphanum_fraction": 0.5062500238418579, "avg_line_length": 20.148147583007812, "blob_id": "d6a46206562c2d2cba01dc44ab49c780b756b6ae", "content_id": "8978993b19d152a500b7c8c292a6b968eea46341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 49, "num_lines": 27, "path": "/Day 0, Statistics.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#day 0, Statistics (Mean, Median, Mode)\r\nfrom collections import *\r\nn= input()\r\nvals= map(int,raw_input().split())\r\ntotal=0\r\nmodeVals=Counter(vals)\r\nmaxVal=max(modeVals.values())\r\nmodes=[]\r\nfor i in modeVals.keys():\r\n if modeVals[i]==maxVal:\r\n modes.append(i)\r\n \r\nmean=round(sum(vals)/float(n),1)\r\n\r\nif n%2==1:\r\n median=sorted(vals)[(n+1)/2 - 1]\r\n \r\nelse:\r\n median=sum(sorted(vals)[(n/2)-1:(n/2)+1])/2.0\r\n \r\nif len(modes)==1: \r\n mode= modes[0]\r\n \r\nelse:\r\n mode=min(modes)\r\n \r\nprint str(mean)+'\\n'+str(median)+'\\n'+str(mode) " }, { "alpha_fraction": 0.6628609895706177, "alphanum_fraction": 0.6762928366661072, "avg_line_length": 40.599998474121094, "blob_id": "50fb84f458477f694fb3064a11245b294c431445", "content_id": "f06815e2cfed0d33679567b40c7bec9bf37f8db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 97, "num_lines": 35, "path": "/AI Statistics and Machine Learning Correlation and Regression.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#physicsScores=map(int,raw_input().split())\r\n#historyScores=map(int,raw_input().split())\r\n\r\ndef KarlPearsonsCoefficient(physicsScores,historyScores):\r\n xValues=physicsScores\r\n yValues=historyScores\r\n xSquareValues=[i**2 for i in xValues]\r\n ySquareValues=[i**2 for i in yValues]\r\n n=len(xValues)\r\n xyValues=[xValues[i]*yValues[i] for i in range(n)]\r\n numerator=(n*sum(xyValues)- sum(xValues)*sum(yValues))\r\n denominator=(n*sum(xSquareValues)-(sum(xValues)**2))*(n*sum(ySquareValues)-(sum(yValues)**2))\r\n return round(numerator/denominator**0.5,3)\r\n \r\ndef SlopeOfRegressionLine(independent,dependent):\r\n y=dependent\r\n x=independent \r\n xMean=float(sum(x)/len(x))\r\n yMean=float(sum(y)/len(y))\r\n n=len(x)\r\n xStandardDeviation=((sum([(x[i]-xMean)**2 for i in range(n)])/(n-1))**0.5)\r\n yStandardDeviation=((sum([(y[i]-yMean)**2 for i in range(n)])/(n-1))**0.5)\r\n r=KarlPearsonsCoefficient(x,y)\r\n return round(r*(yStandardDeviation/float(xStandardDeviation)),3)\r\n \r\ndef InterceptOfRegressionLine(physicsScores,historyScores):\r\n xValues=physicsScores\r\n yValues=historyScores\r\n xSquareValues=[i**2 for i in xValues]\r\n #ySquareValues=[i**2 for i in yValues]\r\n n=len(xValues)\r\n xyValues=[xValues[i]*yValues[i] for i in range(n)] \r\n numerator= float((sum(yValues)*sum(xSquareValues))- (sum(xValues)*sum(xyValues)))\r\n denominator=(n*sum(xSquareValues)-(sum(xValues))**2)\r\n return round(numerator/denominator,3)" }, { "alpha_fraction": 0.5682137608528137, "alphanum_fraction": 0.5907173156738281, "avg_line_length": 26.30769157409668, "blob_id": "8e3fc8717ba51948effe27b706a71eb355473364", "content_id": "ecd88e3a601d384f11406cb4e5ae6c34f1dfaff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/Strong Password", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nimport sys\n\ndef minimumNumber(n, password):\n numbers = \"0123456789\"\n lower_case = \"abcdefghijklmnopqrstuvwxyz\"\n upper_case = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n special_characters = \"!@#$%^&*()-+\"\n\n count = 0 \n if any(i.isdigit() for i in password) == False:\n count += 1\n if any(i.islower() for i in password) == False:\n count += 1\n if any(i.isupper() for i in password) == False:\n count += 1\n if any(i in special_characters for i in password) == False:\n count+=1\n return max(count,6-n)\n\nif __name__ == \"__main__\":\n n = int(raw_input().strip())\n password = raw_input().strip()\n answer = minimumNumber(n, password)\n print answer\n\n" }, { "alpha_fraction": 0.6616766452789307, "alphanum_fraction": 0.6796407103538513, "avg_line_length": 28.363636016845703, "blob_id": "2c171beb4e469432934658e9d3435299dfea9b33", "content_id": "38fc88b1c8b551945f003f9af4cd55ff330fadb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/Day 4, Statistics Geometric Distribution 2.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Day 4, Geometric Distribution during few inspections\r\n#Enter your code here. Read input from STDIN. Print output to STDOUT\r\nnumerator,denominator=map(float,raw_input().split())\r\nn=input()\r\np=numerator/denominator\r\nq=1-p\r\ngd=0\r\nfor i in range(1,n+1):\r\n gd+= (q**(n-i))*p #geometric distribution\r\n \r\nprint round(gd,3)\r\n" }, { "alpha_fraction": 0.5435459017753601, "alphanum_fraction": 0.5451011061668396, "avg_line_length": 26.295454025268555, "blob_id": "7e29b0417b0c4c8ae3ef73c1becd84e75327d1a5", "content_id": "e771dc0d471aab77bfd92c468c2b2fae93d18ca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "no_license", "max_line_length": 110, "num_lines": 44, "path": "/Classes Torsion Angle.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport numpy\r\nimport math\r\n\r\nclass CartesianCoordinates(object):\r\n def __init__(self,A,B,C,D):\r\n self.A=A\r\n self.B=B\r\n self.C=C\r\n self.D=D\r\n self.Angle= None\r\n \r\n def vectorDiff(self, X,Y):\r\n return numpy.subtract(X,Y)\r\n \r\n def crossProduct(self, X,Y):\r\n return numpy.cross(X,Y)\r\n \r\n def dotProduct(self,X,Y):\r\n return numpy.dot(X,Y)\r\n \r\n def modulus(self, X):\r\n return math.sqrt(sum([i**2 for i in X]))\r\n \r\n def TorsionAngle(self,X,Y):\r\n self.Angle= numpy.degrees(numpy.arccos( self.dotProduct(X,Y) / (self.modulus(X) * self.modulus(Y)) ) )\r\n print round(self.Angle,2)\r\n \r\n def Program(self):\r\n \r\n AB=self.vectorDiff(self.A,self.B)\r\n BC=self.vectorDiff(self.B, self.C)\r\n CD=self.vectorDiff(self.C, self.D)\r\n X= self.crossProduct(AB,BC)\r\n Y= self.crossProduct(BC,CD)\r\n self.TorsionAngle(X,Y)\r\n \r\n\r\nA= map(float, raw_input().split())\r\nB= map(float, raw_input().split())\r\nC= map(float, raw_input().split())\r\nD= map(float, raw_input().split()) \r\nCC=CartesianCoordinates(A,B,C,D)\r\nCC.Program()\r\n\r\n \r\n \r\n \r\n " }, { "alpha_fraction": 0.5447761416435242, "alphanum_fraction": 0.5522388219833374, "avg_line_length": 26.44444465637207, "blob_id": "ec57b488c7cbb79fe45bc097f21b9c8ea6528431", "content_id": "67094b21c3112eb824ad7a69bb02d5f10860af86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/Algorithms Implementation Cut the Sticks.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n = int(raw_input().strip())\r\narr = map(int,raw_input().strip().split(' '))\r\nlength=min(arr)\r\nwhile len(arr)!=0:\r\n print len(arr)\r\n length=min(arr)\r\n #print \"length=\"+ str(length)\r\n arr=[i-length for i in arr if (i-length)!=0]\r\n #print arr\r\n \r\n \r\n" }, { "alpha_fraction": 0.5570934414863586, "alphanum_fraction": 0.5640138387680054, "avg_line_length": 17.0625, "blob_id": "d3f43a20f9fa5cf7d295020ff62c7e565b63a31f", "content_id": "90b582a4f9f9e5d5ddfa0288c5caf53a5d050be2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 45, "num_lines": 16, "path": "/camelCase", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nimport sys\n\ndef camelcase(s):\n # Complete this function\n count = 1\n for i in s:\n if i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n count += 1\n return count\n\nif __name__ == \"__main__\":\n s = raw_input().strip()\n result = camelcase(s)\n print result\n" }, { "alpha_fraction": 0.5167173147201538, "alphanum_fraction": 0.5167173147201538, "avg_line_length": 20.35714340209961, "blob_id": "a6f9f9d8cb09fe27e507e8b7e9b0a52a3596cd8a", "content_id": "d807d61ae18583951a2083e7935dc1fb225e6418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/Algorithms Diagnol Difference.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n = int(raw_input().strip())\r\na = []\r\nfor a_i in xrange(n):\r\n a_temp = map(int,raw_input().strip().split(' '))\r\n a.append(a_temp)\r\n\r\npDiagnol= [a[i][i] for i in range(n)]\r\n\r\nfor i in range(n):\r\n a[i].reverse()\r\n \r\nsDiagnol= [a[i][i] for i in range(n)] \r\n\r\nprint abs( sum(pDiagnol)- sum(sDiagnol))\r\n \r\n \r\n" }, { "alpha_fraction": 0.6514851450920105, "alphanum_fraction": 0.6603960394859314, "avg_line_length": 26.29729652404785, "blob_id": "1d8a563cd09df771dc495fec132398673d60d262", "content_id": "503dee677d0e1ad0eb85e29e01a47f3c433d25f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 75, "num_lines": 37, "path": "/AI Statistics and Machine Learning: The best aptitude test.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\ndef mean(x):\n return round(sum(x)/float(n),1)\n\ndef standardDeviation(values,mean):\n data=[(val-mean)**2 for val in values]\n return (sum(data)/float(len(data)))**0.5\n\n\ndef pearsonCorrelationCoefficient(x,y):\n xMean=mean(x)\n yMean=mean(y)\n xStd=standardDeviation(x,xMean)\n yStd=standardDeviation(y,yMean)\n numerator = sum( (x[i]-xMean)*(y[i]-yMean) for i in range(n))\n denominator = n*xStd*yStd\n if denominator == 0:\n return 0\n else:\n return round((numerator/denominator),3)\n\nt = input()\nfor testcase in range(t):\n n = input()\n gpa = map(float, raw_input().split())\n testScores = []\n\n for i in range(5):\n testScores.append(map(float, raw_input().split()))\n\n correlationCoefficient = []\n\n for i in testScores:\n correlationCoefficient.append(pearsonCorrelationCoefficient(i,gpa))\n\n sol = correlationCoefficient.index(max(correlationCoefficient))+1\n print sol\n" }, { "alpha_fraction": 0.513383686542511, "alphanum_fraction": 0.5229924321174622, "avg_line_length": 27.5, "blob_id": "0a98344402aad210dfb333660747280a52684694", "content_id": "594a3376b27b4dc57e0bd174611cd76823735e6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 106, "num_lines": 48, "path": "/Classes Torsion Angle without Numpy.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\nimport math\r\n\r\nclass CartesianCoordinates(object):\r\n def __init__(self,A,B,C,D):\r\n self.A=A\r\n self.B=B\r\n self.C=C\r\n self.D=D\r\n self.Angle= None\r\n \r\n def vectorDiff(self, X,Y):\r\n return [Y[i]- X[i] for i in range(len(X))]\r\n \r\n def crossProduct(self, X,Y):\r\n vector=[]\r\n vector.append( X[1] * Y[2]- X[2]* Y[1])\r\n vector.append( X[2] * Y[0]- X[0]* Y[2])\r\n vector.append( X[0] * Y[1]- X[1]* Y[0])\r\n return vector\r\n\r\n \r\n def dotProduct(self,X,Y):\r\n return sum([X[i] * Y[i] for i in range(len(X))])\r\n \r\n def modulus(self, X):\r\n return math.sqrt(sum([i**2 for i in X]))\r\n \r\n def TorsionAngle(self,X,Y):\r\n self.Angle= math.degrees(math.acos( self.dotProduct(X,Y) / (self.modulus(X) * self.modulus(Y)) ) )\r\n \r\n def Program(self):\r\n \r\n AB=self.vectorDiff(self.A,self.B)\r\n BC=self.vectorDiff(self.B, self.C)\r\n CD=self.vectorDiff(self.C, self.D)\r\n X= self.crossProduct(AB,BC)\r\n Y= self.crossProduct(BC,CD)\r\n self.TorsionAngle(X,Y)\r\n print round(self.Angle,2)\r\n\r\nA= map(float, raw_input().split())\r\nB= map(float, raw_input().split())\r\nC= map(float, raw_input().split())\r\nD= map(float, raw_input().split()) \r\nCC=CartesianCoordinates(A,B,C,D)\r\nCC.Program()\r\n\r\n \r\n \r\n \r\n " }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.60317462682724, "avg_line_length": 18.77777862548828, "blob_id": "7764ba06fbf386258ce6b5bdacabc946e26cd5a1", "content_id": "c9e4b4afeaee0e6cf198dabc7b39e5b722fe96eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/Day 0, Statistics(Weighted Mean).py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Day 0: Weighted Mean\r\nn=input()\r\nX=map(int,raw_input().split())\r\nW=map(int,raw_input().split())\r\ntotal=0\r\nfor i in range(n):\r\n total+=X[i]*W[i]\r\n\r\nprint round(total/float(sum(W)),1) " }, { "alpha_fraction": 0.5633333325386047, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 26.100000381469727, "blob_id": "0e31ae7737f8847cb351704f529fceffdef39bb9", "content_id": "dee5a741447ed32572080001e18bf8b30500ce79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 73, "num_lines": 10, "path": "/Regex Email address authenticity.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\n\r\nN= input()\r\nemails =[]\r\nfor i in range(N):\r\n emails.append(raw_input())\r\n\r\npattern= re.compile('(^[a-zA-Z0-9_-]+@[a-zA-Z0-9]+\\.[a-zA-Z]{0,3}$)') \r\nprint sorted(filter(pattern.match,emails)) \r\n\r\n \r\n\r\n\r\n " }, { "alpha_fraction": 0.5358851552009583, "alphanum_fraction": 0.5526315569877625, "avg_line_length": 30.30769157409668, "blob_id": "76a91a859b1b5b73ef219f6b5f7487c957b044e8", "content_id": "477e2a9642eb509631fca859907fb38925a0a945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/Regex and Parsing UID.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import re\r\n\r\nno_repeats = r\"(?!.*(.).*\\1)\"\r\ntwo_or_more_upper = r\"(?=(?:.*[A-Z]){2,})\"\r\nthree_or_more_digits = r\"(?=(?:.*\\d){3,})\"\r\nten_alphanumerics = r\"[a-zA-Z0-9]{10}\"\r\nfilters = no_repeats, two_or_more_upper, three_or_more_digits, ten_alphanumerics\r\n\r\nfor uid in [raw_input() for _ in range(input())]:\r\n if all([re.match(f, uid) for f in filters]):\r\n print (\"Valid\")\r\n else:\r\n print (\"Invalid\")" }, { "alpha_fraction": 0.6430062651634216, "alphanum_fraction": 0.6471816301345825, "avg_line_length": 20.772727966308594, "blob_id": "a641b788af28c653df01111c1c50ba7ea7847e83", "content_id": "37ba04aec7731cb2c6cd5a4314a2fb0a10ee9ba9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/Day 9: Multiple Linear Regression.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom sklearn.linear_model import LinearRegression\nm,n = map(int,raw_input().split())\nx,y = [],[]\nfor i in range(n):\n vals=map(float,raw_input().split())\n x.append(vals[:-1])\n y.append(vals[-1])\n\nq = int(raw_input())\ntest = []\nfor i in range(q):\n vals=map(float,raw_input().split())\n test.append(vals)\n\n\nreg = LinearRegression()\nreg.fit(x,y)\n\npred = reg.predict(test)\nfor i in pred:\n print i\n" }, { "alpha_fraction": 0.44545453786849976, "alphanum_fraction": 0.46060606837272644, "avg_line_length": 23.230770111083984, "blob_id": "0c8d71aec4204dababe0425bee61a74eb3943e60", "content_id": "9c57a5b52cdb9d6108bbebcc77884474f6e7af9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 43, "num_lines": 13, "path": "/Algorithms Implementation Divisible Sum Pairs.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n,k = raw_input().strip().split(' ')\r\nn,k = [int(n),int(k)]\r\na = map(int,raw_input().strip().split(' '))\r\npairs=[]\r\ncount=0\r\nfor i in range(len(a)-1):\r\n for j in range(1,len(a)):\r\n if i<j and ((a[i]+a[j]) % k ==0):\r\n pairs.append([a[i],a[j]])\r\n count+=1\r\n\r\n#print pairs \r\nprint count " }, { "alpha_fraction": 0.6508875489234924, "alphanum_fraction": 0.665680468082428, "avg_line_length": 28.545454025268555, "blob_id": "e614334a304f9cf5a7ff2cbe5b0973056732d072", "content_id": "245964c40f95e913bfef418d6eb78a9d4cb652c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/Day 1,Statistics Standard Deviation.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nn=input()\r\nx=map(int,raw_input().split())\r\nmean=round(sum(x)/float(n),1)\r\n\r\ndef standardDeviation(values,mean):\r\n data=[(val-mean)**2 for val in values]\r\n return (sum(data)/float(len(data)))**0.5\r\n \r\nsd=standardDeviation(x,mean)\r\nprint round(float(sd),1) " }, { "alpha_fraction": 0.7412280440330505, "alphanum_fraction": 0.7412280440330505, "avg_line_length": 44, "blob_id": "f8d3d32862ae9057ae06e0dbb0290ef290ce0c07", "content_id": "45c95a62c362a9aea1179095fe54e37433ff825e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 69, "num_lines": 5, "path": "/itertools Combinations with Replacement.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import combinations_with_replacement\r\ns,k=raw_input().split()\r\nfor i in combinations_with_replacement(sorted(s),int(k)):\r\n print \"\".join(i)" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 25, "blob_id": "5738906bae4fde7bb70445cdea1cc4c57906a8f6", "content_id": "286259e378074031487a9dcab393363bd4e0c8cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/Math Mod Power.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\na=int (raw_input())\r\nb=int (raw_input())\r\nm=int (raw_input())\r\nprint a**b\r\nprint (a**b)%m " }, { "alpha_fraction": 0.4893617033958435, "alphanum_fraction": 0.5265957713127136, "avg_line_length": 17, "blob_id": "b4fd4322c54520c6f9f5c25c4d1ac4cc9a7bad28", "content_id": "2eb94ffccca41023429dd02411df37fcdee6f6d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/Math Find Angle MBC.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport math\r\n\r\nab = input()\r\nbc = input()\r\n\r\nh = math.sqrt(ab**2 + bc**2)\r\nh = h / 2.0\r\nadj = bc / 2.0\r\nprint str(int(round(math.degrees(math.acos(adj/h))))) + \"ยฐ\"" }, { "alpha_fraction": 0.5577557682991028, "alphanum_fraction": 0.5643564462661743, "avg_line_length": 19.200000762939453, "blob_id": "384509eaa3cd4cc16c1a924ac6f41b9b378e9793", "content_id": "96ef31eec70df454223b63b4aa90b39228c9f026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/lonelyInteger", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nimport sys\n\ndef lonely_integer(a):\n lonelyInteger = a[0]\n for i in range(len(a)):\n if a.count(a[i]) == 1:\n lonelyInteger = a[i]\n return lonelyInteger\n \n \nn = int(raw_input().strip())\na = map(int,raw_input().strip().split(' '))\nprint lonely_integer(a)\n" }, { "alpha_fraction": 0.4053058326244354, "alphanum_fraction": 0.4274134039878845, "avg_line_length": 28.155555725097656, "blob_id": "a3a3f2770948843022076e38b7a83d64e794dc7a", "content_id": "1638db2693d5c1412c434332d3fd966730e13eab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1357, "license_type": "no_license", "max_line_length": 69, "num_lines": 45, "path": "/DataStructures Stack.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nclass Stack(object):\r\n def __init__(self):\r\n self.stack=[]\r\n self.maximum=-999999\r\n \r\n def push(self,element):\r\n self.stack.append(element)\r\n \r\n \r\n def pop(self):\r\n if len(self.stack)!=0:\r\n if self.stack[0]==self.maximum:\r\n self.maximum=max()\r\n \r\n self.stack.remove(self.stack[0]) \r\n \r\n def maxElement(self):\r\n if len(self.stack)!=0:\r\n return max(self.stack) \r\n \r\n def problem(self,N):\r\n for n in range(N):\r\n op= raw_input().split()\r\n if op[0]=='1':\r\n if op[1]> self.maximum:\r\n self.maximum=op[1] \r\n self.stack.insert(0,op[1])\r\n\r\n if op[0]=='2':\r\n if len(self.stack)!=0:\r\n top=self.stack[0]\r\n self.stack.remove(top)\r\n if self.maximum==top and len(self.stack)!=0:\r\n self.maximum=max(self.stack)\r\n if self.maximum==top and len(self.stack)==0:\r\n self.maximum=-999999\r\n \r\n\r\n if op[0]=='3':\r\n print self.maximum\r\n\r\nS=Stack() \r\nN=input()\r\nS.problem(N)\r\n" }, { "alpha_fraction": 0.4433962404727936, "alphanum_fraction": 0.4591194987297058, "avg_line_length": 20.85714340209961, "blob_id": "5a2b743689d8a6c3f84b0a375b37911883222012", "content_id": "49ca0eb7f6ce34c4e8767e4bbd0362830c9f141b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/Algorithms Right Circular Rotation.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n,k,q = raw_input().strip().split(' ')\r\nn,k,q = [int(n),int(k),int(q)]\r\na = map(int,raw_input().strip().split(' '))\r\n#print a \r\nif k%n!=0:\r\n k=k%n\r\n for i in range(k):\r\n last=a[-1]\r\n a=a[:-1]\r\n a.insert(0,last)\r\n \r\nfor a0 in xrange(q):\r\n m = int(raw_input().strip())\r\n print a[m]" }, { "alpha_fraction": 0.6494252681732178, "alphanum_fraction": 0.6494252681732178, "avg_line_length": 32, "blob_id": "deb9e56d5127883ab4abceec30e4f6c43a07ae66", "content_id": "6564a2db66aa0ebf9c39cb90c9051e3e2ad7d0c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 67, "num_lines": 5, "path": "/numpy transpose & flatten.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nN, M = map(int, raw_input().split())\r\narr=numpy.array( [map(int, raw_input().split()) for i in range(N)])\r\nprint numpy.transpose(arr)\r\nprint arr.flatten()\r\n " }, { "alpha_fraction": 0.802431583404541, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 64.80000305175781, "blob_id": "c5b2d63f45fe3ad682a7528172556f08771f7c9d", "content_id": "c5a9839047f3760d5ccc4baf4b0fdfd05b6de0cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 329, "license_type": "no_license", "max_line_length": 90, "num_lines": 5, "path": "/README.md", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# HackerRank-Python-Solutions\nThis repository is created to upload most of the solutions to the problems in HackerRank. \nIt'll be helpful for the beginners in python(Considering me when I started python).\nI used python 2. Changes can be made for python 3.\nIntersted people can make contributions by commiting to this repository.\n" }, { "alpha_fraction": 0.5806451439857483, "alphanum_fraction": 0.5846773982048035, "avg_line_length": 44.599998474121094, "blob_id": "ce7a7d4cf0ef2c62acb48dc950cde16ca94e8532", "content_id": "0cf76541a3742fe38e920ea13ab1088e15455b08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 68, "num_lines": 5, "path": "/numpy Arrays Concatenation.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nN,M, P= map(int, raw_input().split())\r\nA= numpy.array([ map(int, raw_input().split()) for i in range(N)] )\r\nB= numpy.array([ map(int, raw_input().split()) for i in range(M)] )\r\nprint numpy.concatenate((A,B),axis =0)\r\n \r\n" }, { "alpha_fraction": 0.4483775794506073, "alphanum_fraction": 0.5221238732337952, "avg_line_length": 26.08333396911621, "blob_id": "8d9e46d1516955ce7f01da65d8596f859935b756", "content_id": "a16507d78816742915977b3bec2698c2714b7f7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/Algorithms Time Conversion.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "time = raw_input().strip()\r\n\r\nrailTime=\"\"\r\nif time[-2:]==\"PM\" and time[:2]!='12':\r\n railTime=str(int(time[:2])+12)\r\nif time[-2:]==\"PM\" and time[:2]=='12': \r\n railTime+=time[:2]\r\nif time[-2:]==\"AM\" and time[:2]==\"12\":\r\n railTime=\"00\"\r\nif time[-2:]==\"AM\" and time[:2]!=\"12\": \r\n railTime+=time[:2]\r\nprint railTime+time[2:-2] " }, { "alpha_fraction": 0.4637404680252075, "alphanum_fraction": 0.4866412281990051, "avg_line_length": 25.6842098236084, "blob_id": "0b4f7a1747c3eb26238a2ffffc3cb8fb25f83170", "content_id": "c79f5ae4ff74954431a7d13c98a8884a5fddf8b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/DataStructures Stack Solution.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import sys\r\n\r\nstack = []\r\nmax_items = []\r\n\r\nN = int(sys.stdin.readline())\r\nfor i in xrange(0, N):\r\n query = sys.stdin.readline().split()\r\n if query[0] == \"1\":\r\n x = int(query[1])\r\n stack.append(x)\r\n if len(max_items) == 0 or x >= max_items[len(max_items) - 1]:\r\n max_items.append(x)\r\n elif query[0] == \"2\":\r\n x = stack.pop()\r\n if max_items[len(max_items) - 1] == x:\r\n max_items.pop()\r\n else: # query[0] == \"3\"\r\n print(max_items[len(max_items) - 1])" }, { "alpha_fraction": 0.65234375, "alphanum_fraction": 0.65234375, "avg_line_length": 30, "blob_id": "d022dd6ff982d68f2e89cd4a6943d69d549fa313", "content_id": "c3c506b7372e49dbfc62c50cd9d8bd5427af6b60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/itertools Groupby.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import *\r\ns=raw_input()\r\nchar= [int(i) for i,j in groupby(s)]\r\ncount= [len(list(j)) for i,j in groupby(s)]\r\n\r\nfor i in range(len(char)):\r\n print (count[i], char[i]),\r\n" }, { "alpha_fraction": 0.5972696542739868, "alphanum_fraction": 0.6279863715171814, "avg_line_length": 34.375, "blob_id": "66d08cb360b99e6be06ef92a242a17449aa5484d", "content_id": "05511fbff23b6cab693c1b07288755cf40420999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 84, "num_lines": 8, "path": "/Day 6, Statistics Central Limit Theorem 2.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import math\r\ntickets,n,mean,sd=input(),input(),input(),input()\r\ndef centralLimitTheorem(tickets,n,mean,sd):\r\n newMean=n*mean\r\n newSd=(n**0.5)*sd \r\n return round(0.5 * (1 + math.erf((tickets - newMean) / (newSd * (2 ** 0.5)))),4)\r\n\r\nprint centralLimitTheorem(tickets,n,mean,sd)\r\n\r\n" }, { "alpha_fraction": 0.5368663668632507, "alphanum_fraction": 0.5437787771224976, "avg_line_length": 16.772727966308594, "blob_id": "b43ecaeced0d1b9454e722d144688d92be8c44be", "content_id": "a033185b345ae89e4945d2c1e691194de8d40ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 33, "num_lines": 22, "path": "/Algorithms Implementation Encryption.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import math\r\ns=raw_input()\r\ncode=s[:]\r\nl=len(s)\r\ncolumns=int(math.ceil(l**(0.5)))\r\nrows=int(columns)-1\r\nif rows*columns<l:\r\n rows=columns\r\n \r\ngrid=[]\r\nfor i in range(rows):\r\n grid.append(code[:columns])\r\n code=code[columns:]\r\n\r\nencrypted=''\r\nfor i in range(columns):\r\n for j in range(rows):\r\n if i<len(grid[j]):\r\n encrypted+=grid[j][i]\r\n encrypted+=\" \" \r\n\r\nprint encrypted \r\n \r\n" }, { "alpha_fraction": 0.5914893746376038, "alphanum_fraction": 0.5914893746376038, "avg_line_length": 21.299999237060547, "blob_id": "fcd06467a93bfd7041ea4a50fa23b8a1dd923488", "content_id": "aac3baa9245119bf1d569cccda60e19972cc1ea7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Regex.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\nN= input()\r\nfor i in range(N):\r\n ans=True\r\n try:\r\n reg= re.compile(raw_input())\r\n except re.error:\r\n ans= False\r\n print ans " }, { "alpha_fraction": 0.6412556171417236, "alphanum_fraction": 0.6412556171417236, "avg_line_length": 25.625, "blob_id": "d2339bd81155fc1cce913620133afd02d9dd01cb", "content_id": "081c12cd3b0ea52b9d41bab3db546d9d068a2464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/Algorithms Implementation Max-Mini.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "a,b,c,d,e = raw_input().strip().split(' ')\r\na,b,c,d,e = [int(a),int(b),int(c),int(d),int(e)]\r\nnumbers=[a,b,c,d,e]\r\ntotal=sum(numbers)\r\nmaximum=total-min(numbers)\r\nminimum=total-max(numbers)\r\nprint maximum\r\nprint minimum\r\n\r\n" }, { "alpha_fraction": 0.6705336570739746, "alphanum_fraction": 0.6983758807182312, "avg_line_length": 34.08333206176758, "blob_id": "9f17ac7901cd234022d329e6e5a5fc8b15328035", "content_id": "c575d98b32062408672e4e7303653029e400a878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 72, "num_lines": 12, "path": "/Day 5, Statistics Normal Distribution.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Day 5, Statistics Normal Distribution\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport math\r\nmean,sd=map(float,raw_input().split())\r\nx=input()\r\ny1,y2=map(float,raw_input().split())\r\n\r\ndef normalDistribution(x,mean,sd):\r\n return round(0.5 * (1 + math.erf((x - mean) / (sd * (2 ** 0.5)))),3)\r\n\r\nprint normalDistribution(x,mean,sd)\r\nprint normalDistribution(y2,mean,sd)-normalDistribution(y1,mean,sd)" }, { "alpha_fraction": 0.6672828197479248, "alphanum_fraction": 0.6672828197479248, "avg_line_length": 39.769229888916016, "blob_id": "3e4e3360b24406cd9d6d5ce151455bf2da630ee4", "content_id": "534700a0825ba17230d63d9ceb130b3a5f171745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 78, "num_lines": 13, "path": "/Numpy Polynomials.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\npoly= map(float,raw_input().split())\r\nval=input()\r\nprint numpy.polyval(poly,val) # evaluates the polynomial at specified value\r\n\r\n#numpy.polyint(poly) --- indefinite integral\r\n#numpy.polyder(poly) --- derivative\r\n#numpy.poly(poly) --- roots to polynomial equation\r\n#numpy.roots(poly) --- find roots of polynomial\r\n#numpy.polyadd(A,B) --- add's two or more polynomial\r\n#numpy.polysub(A,B) --- subtracts two or more polynomial\r\n#numpy.polymul(A,B) --- multiply\r\n#numpy.polydiv(A,B) --- divides" }, { "alpha_fraction": 0.43283581733703613, "alphanum_fraction": 0.46268656849861145, "avg_line_length": 22.25, "blob_id": "421a2f1286d28f30f6637de92ef9fb4bf39692b9", "content_id": "47fefb396e9705679b03d5310fc3bd88a116365b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/Algorithms Implementation Save the Prisoner.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "t=input()\r\nfor testCases in range(t):\r\n vals=map(int,raw_input().split())\r\n n,m,s=vals[0],vals[1],vals[2]\r\n if ((m+s)%n-1) == 0:\r\n print n\r\n else:\r\n print ((m+s)%n-1) \r\n " }, { "alpha_fraction": 0.46022728085517883, "alphanum_fraction": 0.47159090638160706, "avg_line_length": 23.66666603088379, "blob_id": "95c98b4ce4df50382760d983642606d1cd4a2026", "content_id": "adf34a3b7d496263cfdec38b2993aef204383ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/Reliance.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "def RelianceWipodKey():\r\n key=\"\"\r\n for i in range(10):\r\n key+=i\r\n for alpha in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\r\n key+=alpha\r\n \r\n" }, { "alpha_fraction": 0.38679245114326477, "alphanum_fraction": 0.42318058013916016, "avg_line_length": 22.758621215820312, "blob_id": "6599471ce6de9f246443404f5e7e31244d4caf3a", "content_id": "dfc29de49e9f56563cac065ba2828c548c0e7056", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/Collections Moderate Problem using deques.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nN = int(raw_input())\r\nassert N>=1 and N<=5\r\ncubes=[]\r\nfor i in range(N):\r\n n = int(raw_input())\r\n assert n>=1 and n<=10**5\r\n cubes.append((map(int, raw_input().split())))\r\n \r\nfor i in cubes:\r\n d= deque(i)\r\n ans= \"Yes\"\r\n vp=[1000000000000]\r\n while (ans==\"Yes\" and len(d)>1):\r\n if d[0] <= d[-1]:\r\n k=d.pop()\r\n if k > vp[-1]:\r\n ans= \"No\"\r\n else:\r\n k=d.popleft()\r\n if k > vp[-1]:\r\n ans= \"No\"\r\n \r\n vp.append(k) \r\n if len(d)==1:\r\n if d[0]> vp[-1]:\r\n ans= \"No\"\r\n print ans \r\n \r\n \r\n" }, { "alpha_fraction": 0.7490494251251221, "alphanum_fraction": 0.7490494251251221, "avg_line_length": 27, "blob_id": "6d0a262b5ea2aebadfe93ddc2fe700a4e4b4c071", "content_id": "0e59b4cf2ae79eef9952d30ff15725725e42cf72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 79, "num_lines": 9, "path": "/sets.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "numOfEng= raw_input()\r\nrollOfEng=set(raw_input().split())\r\nnumOfFre= raw_input()\r\nrollOfFre=set(raw_input().split())\r\n\r\ntotal= numOfEng+numOfFre\r\n\r\nstudentsTakingPaper= list(rollOfEng.union(rollOfFre)) #converting set into list\r\nprint len(studentsTakingPaper)\r\n\r\n" }, { "alpha_fraction": 0.4795321524143219, "alphanum_fraction": 0.48148149251937866, "avg_line_length": 22.549999237060547, "blob_id": "ab06c1c9df5da2e6a47a7ca6b6a4b721763089c4", "content_id": "a41191986732c1a3509a52df4dab7f32ca383126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 63, "num_lines": 20, "path": "/Algorithms Implementation Taum and B'day.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "t = int(raw_input().strip())\r\ndef costOfBlackGifts(x,y,z):\r\n if y+z < x:\r\n return y+z\r\n else:\r\n return x\r\n \r\ndef costOfWhiteGifts(x,y,z):\r\n if x+z < y:\r\n return x+z\r\n else:\r\n return y\r\n \r\nfor a0 in xrange(t):\r\n b,w = raw_input().strip().split(' ')\r\n b,w = [long(b),long(w)]\r\n x,y,z = raw_input().strip().split(' ')\r\n x,y,z = [long(x),long(y),long(z)]\r\n cost=b*costOfBlackGifts(x,y,z) + w* costOfWhiteGifts(x,y,z)\r\n print cost\r\n \r\n \r\n\r\n" }, { "alpha_fraction": 0.6637930870056152, "alphanum_fraction": 0.6637930870056152, "avg_line_length": 27.25, "blob_id": "9f421decd65093a65cd1bf18968b114dc363119d", "content_id": "5278b0a41be8bb37a464eed790012e5485a9ecdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/Itertools.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import product\r\nA=map(int,raw_input().split())\r\nB=map(int,raw_input().split())\r\nans=\"\"\r\nfor i in product(A,B):\r\n ans+=str(i)+\" \"\r\nprint ans A" }, { "alpha_fraction": 0.6276595592498779, "alphanum_fraction": 0.6276595592498779, "avg_line_length": 29.33333396911621, "blob_id": "9c015b049ca8578094b09e7d7a6f65419a809c86", "content_id": "46976b1c442f140f4ea161f4995fda7ccbfb076b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/Regex and Parsing Split.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\nexpression=r\"[,|.]*\"\r\nfor i in re.split(expression,raw_input().strip()):\r\n if i!='':\r\n print i\r\n" }, { "alpha_fraction": 0.6035242080688477, "alphanum_fraction": 0.607929527759552, "avg_line_length": 22.77777862548828, "blob_id": "30f1107fdfd12196c3b7c7c0015f5205abcb2b28", "content_id": "437586bd99c6b48be51c5b50637344cf77d83d2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/DataStructures Arrays Left Rotation.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n,d = map(int,raw_input().split())\r\narray=map(int,raw_input().split())\r\nrArray=array[::-1]\r\nfor turns in range(d):\r\n p=rArray.pop()\r\n array.append(p)\r\n array.remove(p)\r\nfor elements in array:\r\n print elements, \r\n" }, { "alpha_fraction": 0.5079559087753296, "alphanum_fraction": 0.5177478790283203, "avg_line_length": 28.925926208496094, "blob_id": "63be4d798c541b482e07fa36a514c1a9b96c26eb", "content_id": "3aae5b70d7fb37af3221abd3098b58ceb2da3a9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 69, "num_lines": 27, "path": "/Day 7: Spearman's Rank Correlation Coefficient.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\nn=input()\nx=map(float,raw_input().split())\ny=map(float,raw_input().split())\n \ndef spearmanRankCorrelationCoefficient(x,y):\n xSort=sorted(x)\n ySort=sorted(y)\n rankOfX={}\n rankOfY={}\n for i in xSort:\n rankOfX[i]= xSort.index(i)+1\n \n for i in ySort:\n rankOfY[i]= ySort.index(i)+1\n length= len(x) \n d=[]\n for i in range(length):\n temp=rankOfX[x[i]]- rankOfY[y[i]]\n d.append(temp**2)\n \n numerator = 6* sum(d)\n denominator = length * ((length ** 2 ) -1) \n \n return round(1-(numerator/float(denominator)),3)\n \nprint spearmanRankCorrelationCoefficient(x,y) \n" }, { "alpha_fraction": 0.6830015182495117, "alphanum_fraction": 0.6906585097312927, "avg_line_length": 27.39130401611328, "blob_id": "3f3c8c087ec53ed28c950e3591152a5249d5db12", "content_id": "b2664a45f1649227d00a1d4e06129dea83c587a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/Day 7: Pearson Correlation Coefficient I.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\nn=input()\nx=map(float,raw_input().split())\ny=map(float,raw_input().split())\n\ndef mean(x):\n return round(sum(x)/float(n),1)\n\ndef standardDeviation(values,mean):\n data=[(val-mean)**2 for val in values]\n return (sum(data)/float(len(data)))**0.5\n\n\ndef pearsonCorrelationCoefficient(x,y):\n xMean=mean(x)\n yMean=mean(y)\n xStd=standardDeviation(x,xMean)\n yStd=standardDeviation(y,yMean)\n numerator = sum( (x[i]-xMean)*(y[i]-yMean) for i in range(n))\n denominator = n*xStd*yStd\n return round((numerator/denominator),3)\n\nprint pearsonCorrelationCoefficient(x,y)\n" }, { "alpha_fraction": 0.5563636422157288, "alphanum_fraction": 0.5629090666770935, "avg_line_length": 30.785715103149414, "blob_id": "9b8c40ae0f6f1fa390460278b55c565c3183d73c", "content_id": "22f35224b7f04eb4fc48ee770edd64f8640cb10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1375, "license_type": "no_license", "max_line_length": 71, "num_lines": 42, "path": "/Classes Complex Numbers.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import math\r\n\r\nclass ComplexNo(object):\r\n def __init__(self, real, imaginary):\r\n self.real = real\r\n self.imaginary = imaginary\r\n\r\n def __add__(self, no):\r\n real = self.real + no.real\r\n imaginary = self.imaginary + no.imaginary\r\n return ComplexNo(real, imaginary)\r\n\r\n def __sub__(self, no):\r\n real = self.real - no.real\r\n imaginary = self.imaginary - no.imaginary\r\n return ComplexNo(real, imaginary)\r\n\r\n def __mul__(self, no):\r\n real = self.real * no.real - self.imaginary * no.imaginary\r\n imaginary = self.real * no.imaginary + self.imaginary * no.real\r\n return ComplexNo(real, imaginary)\r\n\r\n def __div__(self, no):\r\n x = float(no.real ** 2 + no.imaginary ** 2)\r\n y = self * ComplexNo(no.real, -no.imaginary)\r\n real = y.real / x\r\n imaginary = y.imaginary / x\r\n return ComplexNo(real, imaginary)\r\n\r\n def mod(self):\r\n real = math.sqrt(self.real ** 2 + self.imaginary ** 2)\r\n return ComplexNo(real, 0)\r\n# can also use __repr__ in place of __str__\r\n def __str__(self):\r\n return \"{0:.2f}{1:+.2f}i\".format(self.real,self.imaginary)\r\n\r\nC = map(float, raw_input().split())\r\nD = map(float, raw_input().split())\r\nx = ComplexNo(*C)\r\ny = ComplexNo(*D)\r\nfinal = [x+y, x-y, x*y, x/y, x.mod(), y.mod()]\r\nprint '\\n'.join(map(str, final))" }, { "alpha_fraction": 0.7123287916183472, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 22.5, "blob_id": "4f7569ef7fa8c51f6218c37c015d827efe86d4af", "content_id": "e31913bf087a3234d0bbf795ad073c9d2ef92b82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/Algorithms Implementation Equalitiy in a array.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "from collections import *\r\nn=input()\r\narray=map(int,raw_input().split())\r\nvalues=Counter(array)\r\n\r\nprint str(len(array)-max(values.viewvalues())) " }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6442307829856873, "avg_line_length": 33, "blob_id": "74cc4fb00b26e0b8572824543ba75a9567ff4a04", "content_id": "f4ea300505f8dcb6ee572068b412de4f62e9725d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/Regex and Parsing Float.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\nn=input()\r\nfor number in range(n):\r\n expression=r'^[+|-]?[0-9]*\\.[0-9]+$'\r\n print bool(re.match(expression,raw_input()))" }, { "alpha_fraction": 0.5057471394538879, "alphanum_fraction": 0.540229856967926, "avg_line_length": 24, "blob_id": "6438dfa887d4f0f9cc6fd40c7c04d5d66db8b95e", "content_id": "f91e02e0fe71009465f1e4c1633e9821f7e854cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/DataStructures Arrays Hour Glass.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#!/bin/python\r\narr = []\r\nmaxVal=-9999\r\nfor arr_i in xrange(6):\r\n arr_temp = map(int,raw_input().split(' '))\r\n arr.append(arr_temp)\r\n\r\n#print arr\r\n\r\nfor i in range(0,4):\r\n for j in range(0,4):\r\n firstRow = arr[i][j]+arr[i][j+1]+arr[i][j+2]\r\n thirdRow= arr[i+2][j]+arr[i+2][j+1]+arr[i+2][j+2]\r\n secondRow= arr[i+1][j+1]\r\n total=firstRow+secondRow+thirdRow\r\n print \"total = \"+ str(total)\r\n maxVal=max(maxVal,total)\r\n print \"maxVal= \"+ str(maxVal)\r\n\r\nprint maxVal\r\n\r\n" }, { "alpha_fraction": 0.3671875, "alphanum_fraction": 0.484375, "avg_line_length": 19.22222137451172, "blob_id": "24fa61505db6ed806346dd733f2080c9066618b6", "content_id": "50160a21b6c0ca51d260b464ef6025c6bfb0df26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/Algorithms Implementation Library Fine.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "d1,m1,y1 = raw_input().strip().split(' ')\r\nd1,m1,y1 = [int(d1),int(m1),int(y1)]\r\nd2,m2,y2 = raw_input().strip().split(' ')\r\nd2,m2,y2 = [int(d2),int(m2),int(y2)]\r\nfine=0\r\nif y1>y2:\r\n fine+=10000\r\nelif y1<y2:\r\n fine=0 \r\nelse:\r\n if m1>m2:\r\n fine+= 500*(m1-m2)\r\n elif m1<m2:\r\n fine=0\r\n else:\r\n if d1>d2:\r\n fine+=15*(d1-d2)\r\nprint fine " }, { "alpha_fraction": 0.6727828979492188, "alphanum_fraction": 0.6850152611732483, "avg_line_length": 25.41666603088379, "blob_id": "3563141f199e879b8becc0d0b49ef262476ee814", "content_id": "f7383e34720811683ef19e3d1c987f13ce1e040e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/Algorithms Implementation Bon Appetit.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nvals=map(int,raw_input().split())\r\nn,k=vals[0], vals[1]\r\ncost=map(int,raw_input().split())\r\ntotal=sum(cost)\r\naCharge=total/2\r\nbActual=(total-(cost[k])) / 2\r\nbCharged=input()\r\nif bCharged!=bActual:\r\n print bCharged-bActual\r\nelse:\r\n print \"Bon Appetit\"" }, { "alpha_fraction": 0.6632652878761292, "alphanum_fraction": 0.6632652878761292, "avg_line_length": 27, "blob_id": "e115ff6df0833b15f9e444a7f1a019f4f4b4d998", "content_id": "6acb8a0399d178e17057ba7790c59df5cb4035b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Collections Moderate Problem.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nN= input()\r\nd= OrderedDict()\r\nwords= [raw_input() for i in range(N)]\r\ncounter= Counter(words)\r\nprint len(counter)\r\nfor i in words:\r\n d[i]= counter[i]\r\nprint \" \".join(map(str,d.values()))\r\n " }, { "alpha_fraction": 0.42895442247390747, "alphanum_fraction": 0.44638070464134216, "avg_line_length": 21, "blob_id": "61a430b58f1e4ff43118acadf59580fd2c3352f5", "content_id": "83f5df737c3ec98405e4c437fc2428ec3815c099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 69, "num_lines": 32, "path": "/Stack.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n'''\r\nclass Stack(object):\r\n def __init__(self):\r\n self.stack=[]\r\n \r\n def push(self,element):\r\n self.stack.append(element)\r\n \r\n \r\n def pop(self):\r\n if len(self.stack)!=0:\r\n self.stack.remove(self.stack[0]) \r\n \r\n def maxElement(self):\r\n if len(self.stack)!=0:\r\n return max(self.stack) \r\n''' \r\n \r\nN=input()\r\ns=[]\r\nfor n in range(N):\r\n #S=Stack()\r\n #print S.stack\r\n op= raw_input().split()\r\n if op[0]=='1':\r\n print op[1]\r\n s.insert(0,op[1])\r\n if op[0]=='2':\r\n s.pop(s[0])\r\n if op[0]=='3':\r\n print max(s)\r\n \r\n" }, { "alpha_fraction": 0.5390625, "alphanum_fraction": 0.5390625, "avg_line_length": 32, "blob_id": "387edb556dd7e40b2d6320352ad2791313ba7864", "content_id": "f536d3339aea7435ad80f93eb6f2f331ed80cd9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 113, "num_lines": 15, "path": "/Day 2, Statistics Compound Event Probability.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Compound Event Probability\r\nimport random, itertools\r\nn=input(\"Number of Urns : \")\r\nX=['R','R','R','R','B','B','B']\r\nY=['R','R','R','R','R','B','B','B','B']\r\nZ=['R','R','R','R','B','B','B','B']\r\ncomboGen=[i for i in itertools.product(X,Y,Z)]\r\nXYZ=[(x,y,z) for x,y,z in comboGen if (x,y,z)==('R','R','B') or (x,y,z)==('R','B','R') or (x,y,z)==('B','R','R')]\r\n\r\ndef findProbability(event,total):\r\n return float(event)/total\r\n\r\nevent=len(XYZ)\r\ntotal=len(comboGen) \r\nprint findProbability(event,total) " }, { "alpha_fraction": 0.6993243098258972, "alphanum_fraction": 0.712837815284729, "avg_line_length": 35, "blob_id": "2d753a39973c3b7e04a42965481f09e1289468d7", "content_id": "b4038df5897c9ef2a93d082f230957df5e61b701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/Day 4, Statistics Geometric Distribution.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "#Day 4, Geometric Distribution at a particular position\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nnumerator,denominator=map(float,raw_input().split())\r\nn=input()\r\np=numerator/denominator\r\nq=1-p\r\ngd= (q**(n-1))*p #geometric distribution\r\nprint round(gd,3)\r\n" }, { "alpha_fraction": 0.6952381134033203, "alphanum_fraction": 0.6952381134033203, "avg_line_length": 43, "blob_id": "1b1ccded656d36df6cd38113c891e09194a5bbc8", "content_id": "b2f238cbe7695dbe61d54011bd3e455319c68c72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/numpy Linear algebra.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "import numpy\r\nN= input()\r\narr= numpy.array( [(map(float, raw_input().split())) for i in range(N)])\r\nprint numpy.linalg.det(arr) #computes the determinent value of a matrix\r\n\r\n#numpy.linalg.eig(array) --- computes the eigen values of the matrix\r\n#numpy.linalg.inv(array) --- computes the inverse of a matrix\r\n" }, { "alpha_fraction": 0.47187501192092896, "alphanum_fraction": 0.5062500238418579, "avg_line_length": 21.230770111083984, "blob_id": "f44c4310dcd9cbb86384e09c528e5651569cc313", "content_id": "637bf6d3f11d1ef70ef0c9cc0c40a436655b1e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/Algorithms Implementation Jump on the clouds game.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n = int(raw_input().strip())\r\nc = map(int,raw_input().strip().split(' '))\r\nposition=0\r\njumps=0\r\nwhile position!=len(c)-1:\r\n print position\r\n if (position+2) <= (len(c)-1) and c[position+2]!=1:\r\n jumps+=1\r\n position+=2\r\n else:\r\n jumps+=1\r\n position+=1\r\nprint jumps \r\n \r\n" }, { "alpha_fraction": 0.6702508926391602, "alphanum_fraction": 0.6810035705566406, "avg_line_length": 26.100000381469727, "blob_id": "25a7972a31638ed3f490c016c1ecd57d41930519", "content_id": "182e409e014506ffe3cabcb56773a5fd47d666dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Regex and Parsing Findall() and Finditer().py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport re\r\nexpression=raw_input()\r\npattern=\"[^aeiouAEIOU]?([aeiouAEIOU]{2,})[^aeiouAEIOU]\"\r\nvalues=re.findall(pattern,expression)\r\nif len(values)>0:\r\n for i in values:\r\n print i\r\nelse:\r\n print -1" }, { "alpha_fraction": 0.48672565817832947, "alphanum_fraction": 0.5044247508049011, "avg_line_length": 16.83333396911621, "blob_id": "61d1a48ec822181c94f9275d5a957b8f021bf341", "content_id": "8260d1a721f77cb73319dcee7792299171cae7f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 28, "num_lines": 6, "path": "/Algorithms Staircase.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "n = int(raw_input().strip())\r\n\r\nfor i in range(1,n+1):\r\n spaces=n-i\r\n sym=i*\"#\"\r\n print spaces*\" \"+sym\r\n" }, { "alpha_fraction": 0.5252525210380554, "alphanum_fraction": 0.5535353422164917, "avg_line_length": 29.133333206176758, "blob_id": "9d9b967ee7b812d130f771dae6065e69d6cc5a06", "content_id": "f67620452f7f4e9abe50f389329d95fba6178df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 495, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/Algorithms Implementation Sherlock and Squares.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport math\r\nt=input()\r\nfor testCase in range(t):\r\n count=0\r\n numbers=map(int,raw_input().split())\r\n lowerLimit=numbers[0]**(0.5)\r\n upperLimit=numbers[1]**(0.5)\r\n num=math.ceil(lowerLimit)\r\n while ( num>=lowerLimit and num<=upperLimit):\r\n if num**2 >= numbers[0] and num**2 <=numbers[1]:\r\n count+=1\r\n # print num**2\r\n num+=1\r\n print count \r\n \r\n \r\n " }, { "alpha_fraction": 0.7290322780609131, "alphanum_fraction": 0.7290322780609131, "avg_line_length": 29, "blob_id": "4de2ea6aa6123846fa63429196d4fd46fbe62549", "content_id": "860cb3a4f6b4d5de67cfa7976ae9868448589fd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 69, "num_lines": 5, "path": "/Math Polar Coordinates.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nimport cmath\r\nz= raw_input()\r\nprint abs(complex(z))\r\nprint cmath.phase(complex(z))\r\n" }, { "alpha_fraction": 0.5654885768890381, "alphanum_fraction": 0.5738045573234558, "avg_line_length": 28.733333587646484, "blob_id": "aa9186d76948e53852a61fa06a549fb871142724", "content_id": "93f0a8b68e98e3502c6b7a62e06ec9d0a77eb24a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 90, "num_lines": 15, "path": "/Collections defaultdict.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom collections import *\r\nn, m = map(int, raw_input().split())\r\nA= [raw_input() for i in range(n)]\r\nB= [raw_input() for i in range(m)]\r\nd= defaultdict(list)\r\nindex=0\r\nfor i in A:\r\n d[i].append( A.index(i, index) + 1)\r\n index+=1\r\nfor i in B:\r\n if i not in A:\r\n print -1\r\n else:\r\n print \" \".join(map(str, d[i])) # print a list of elements with uniform space/gap\r\n \r\n \r\n" }, { "alpha_fraction": 0.6828193664550781, "alphanum_fraction": 0.691629946231842, "avg_line_length": 26.625, "blob_id": "7c78b3a786f2bcbbb4e2be7322f0307a748ff066", "content_id": "0fa4da9785ced7fe5f03e72798537c1732aac873", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/Itertools Permutations.py", "repo_name": "ARAV0411/HackerRank-Solutions-in-Python", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom itertools import permutations\r\nip=raw_input().split()\r\ns=ip[0]\r\nk=int(ip[1])\r\nans= list(permutations(s,k))\r\nfor i in sorted(ans):\r\n print \"\".join(i)" } ]
105
svkampen/square
https://github.com/svkampen/square
67df2a84894b34d354bf08a37e3ca0947eb61f64
82fdcdf392411dfdc6d2e3c78fd44d281c293d13
c30c7f5654ec46b8f42c5ad1d5a5e903956dea27
refs/heads/master
2016-09-06T05:08:06.009497
2015-09-13T19:22:14
2015-09-13T19:22:14
42,134,350
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7769784331321716, "alphanum_fraction": 0.7841726541519165, "avg_line_length": 33.75, "blob_id": "b184d9fbfce6dd238d6c614adb7b3a172bddb418", "content_id": "b76e7a6538d6211bb667959d7c4cc5bb9e200c81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 139, "license_type": "no_license", "max_line_length": 71, "num_lines": 4, "path": "/README.md", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "# square\ndoes weird image processing that really just looks like bad compression\n\n![lena being processed](https://i.imgur.com/rXUw2Xi.gif)\n" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.695652186870575, "avg_line_length": 22, "blob_id": "6ffe6dc00e95fb2d526eef2f8b683188cc1dfac5", "content_id": "5a16ffab31f966fb1940d04e957661feaafa4e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 46, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/Makefile", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "all:\n\tgcc -shared -fPIC -o square.so square.c\n" }, { "alpha_fraction": 0.49095606803894043, "alphanum_fraction": 0.49354004859924316, "avg_line_length": 23.125, "blob_id": "a2baecfdb000ab9bf156c4c5fc3decf6ee01f459", "content_id": "e2db7442d54c11ca634d0e463e8d9deffcc3e2d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/geometry.py", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "class Area():\n def __init__(self, x, y, dx, dy):\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n\nclass Vector2():\n \"\"\" A vector with an x-coordinate and a y-coordinate. \"\"\"\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\ndef get_slice(data, area):\n return data[area.y:area.y+area.dy,\n area.x:area.x+area.dx]\n\n" }, { "alpha_fraction": 0.4214208722114563, "alphanum_fraction": 0.5365985035896301, "avg_line_length": 30.491525650024414, "blob_id": "35bee3942ed3144014187f34b1678e2ebcb534da", "content_id": "0954229eecf8c4c0172c8a0212c49fd26b88f772", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1859, "license_type": "no_license", "max_line_length": 99, "num_lines": 59, "path": "/color.py", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "import math\n\ndef rgb2xyz(arr):\n \"\"\"Convert RGB color representation to CIE XYZ\"\"\"\n arr = [((it + 0.055) / 1.055) ** 2.4 if it > 0.04045 else it / 12.92 for it in arr]\n arr = [it * 100 for it in arr]\n\n X = arr[0] * 0.4124 + arr[1] * 0.3576 + arr[2] * 0.1805\n Y = arr[0] * 0.2126 + arr[1] * 0.7152 + arr[2] * 0.0722\n Z = arr[0] * 0.0193 + arr[1] * 0.1192 + arr[2] * 0.9505\n return [X,Y,Z]\n\ndef xyz2lab(arr):\n \"\"\" Convert CIE XYZ to CIE L* a* b* \"\"\"\n ref_x = 95.047\n ref_y = 100.000\n ref_z = 108.883\n\n arr[0] /= ref_x\n arr[1] /= ref_y\n arr[2] /= ref_z\n\n for n, item in enumerate(arr):\n if ( item > 0.008856 ):\n arr[n] **= (1/3)\n else:\n arr[n] = (7.787 * arr[n]) + (16 / 116)\n\n CIE_L = (116 * arr[1]) - 16\n CIE_a = 500 * (arr[0] - arr[1])\n CIE_b = 200 * (arr[1] - arr[2])\n\n return (CIE_L, CIE_a, CIE_b)\n\ndef rgb2lab(arr):\n \"\"\" RGB -> LAB \"\"\"\n return xyz2lab(rgb2xyz(arr))\n\ndef cie76_deltaE(lab1, lab2):\n \"\"\" Delta-Empfindung calculation according to the Commission internationale de l'รฉclairage, '76\n basically just the euclidean distance \"\"\"\n return math.sqrt((lab2[0] - lab1[0])**2 + (lab2[1] - lab1[1])**2 + (lab2[2] - lab1[2])**2)\n\ndef cie94_deltaE(lab1, lab2):\n \"\"\" Delta-Empfindung calculation according to the CIE, '94 \"\"\"\n delta_l = lab1[0] - lab2[0]\n c1 = math.sqrt(lab1[1]**2 + lab1[2]**2)\n c2 = math.sqrt(lab2[1]**2 + lab2[2]**2)\n delta_c = c1 - c2\n delta_h = math.sqrt(cie76_deltaE(lab1, lab2)**2 - delta_l**2 - delta_c**2)\n s_l = 1\n s_c = 1 + (0.045 * c1)\n s_h = 1 + (0.015 * c1)\n\n return math.sqrt((delta_l / s_l)**2 + (delta_c / s_c)**2 + (delta_h/s_h)**2)\n\ndef rgbdiff(rgb1, rgb2):\n \"\"\" Calculates RGB color \"distance\", using L* a* b* conversions \"\"\"\n return cie76_deltaE(rgb2lab(rgb1), rgb2lab(rgb2))\n" }, { "alpha_fraction": 0.5584608912467957, "alphanum_fraction": 0.5799319744110107, "avg_line_length": 32.12676239013672, "blob_id": "8936732b1251a8d186e9cc12ae0e84ffe617d019", "content_id": "34b4be1601b5d6dcff2139efb54e6d4961967b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4704, "license_type": "no_license", "max_line_length": 100, "num_lines": 142, "path": "/square.py", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "import numpy\nfrom geometry import Area, Vector2, get_slice\nfrom PIL import Image\nimport sys\n\nfrom math import floor, ceil\nfrom fractions import gcd\nfrom functools import reduce, partial, lru_cache\nfrom operator import mul\n\nfrom typing import Optional, Callable\nimport ctypes\n\nsq_c = ctypes.CDLL(\"./square.so\")\nsq_c.rgbdiff_p.restype = ctypes.c_double\nsq_c.rgbdiff_p.argtypes = [ctypes.c_double] * 6\n\ndef rgbdiff(arr1: numpy.ndarray, arr2: numpy.ndarray) -> float:\n \"\"\" Get the color difference for two RGB colors. \"\"\"\n return sq_c.rgbdiff_p(arr1[0], arr1[1], arr1[2],\n arr2[0], arr2[1], arr2[2])\n\ndef remove_array(L,arr):\n ind = 0\n size = len(L)\n while ind != size and not (id(L[ind]) == id(arr)):\n ind += 1\n if ind != size:\n L.pop(ind)\n else:\n raise ValueError('array not found in list.')\n\ndef sq(data):\n \"\"\"Split an image into squares. If the image's greatest common divisor is\n <64, just quarter it. If x == y, also quarter. Returns them in some order,\n can't remember which (figure it out yourself)\"\"\"\n data_size = Vector2(*data.shape[:2])\n edge_length = gcd(data_size.x, data_size.y)\n\n if (data_size.x == data_size.y):\n return quarter(data)\n\n if (edge_length < 64):\n return quarter(data)\n\n n_squares = Vector2(data_size.x / edge_length, data_size.y / edge_length)\n\n slices = []\n\n for i in range(int(n_squares.y)):\n for j in range(int(n_squares.x)):\n print(\"Slicing from [%d:%d] (x) and [%d:%d] (y)\" % (i*edge_length,\n i*edge_length + edge_length, j * edge_length, j*edge_length+edge_length))\n slices.append(get_slice(data, Area(i * edge_length, j * edge_length,\n edge_length, edge_length)))\n\n return slices\n\n\n\ndef quarter(data):\n \"\"\"Split an image into four quarters. Return them in order topleft, topright, botleft, botright.\n If the image has an odd number of pixels on an axis, have one half be +0.5px, the other -0.5px.\n \"\"\"\n\n size = Vector2(*data.shape[:2])\n x_pair = [size.x / 2, size.x / 2]\n y_pair = [size.y / 2, size.y / 2]\n\n if (size.x % 2 == 1):\n x_pair[0] = floor(x_pair[0])\n x_pair[1] = ceil(x_pair[1])\n\n if (size.y % 2 == 1):\n y_pair[0] = floor(y_pair[0])\n y_pair[1] = ceil(y_pair[1])\n\n return [data[:y_pair[0],:x_pair[1]],\n data[:y_pair[0],x_pair[0]:],\n data[y_pair[1]:,:x_pair[1]],\n data[y_pair[1]:,x_pair[0]:]]\ndef process_area(thresh, area):\n \"\"\"Process an Area. Returns True if the area's max_deviation is < thresh,\n False otherwise.\"\"\"\n if 0 in area.shape:\n return [True, numpy.array([0,0,0])]\n average = area.mean(0).mean(0)\n pixel_list = area.reshape(reduce(mul, area.shape[:2]), 3)\n max_deviation = max(map(partial(rgbdiff, average), pixel_list))\n\n if (max_deviation > thresh):\n return [False, average]\n\n return [True, average]\n\ndef calculate_percentage(id, a, part):\n total = id.size\n return 100 - ((part / total) * 100.0)\n\ndef transform_image(input, output, thresh):\n original_image = Image.open(input)\n\n # When indexing, instead of doing [x,y], remember to do [y,x]\n imagedata = numpy.array(original_image,dtype=numpy.float64)\n imagedata /= 255.0\n\n areas = sq(imagedata)\n areas_size = imagedata.size\n\n while areas:\n for area in areas:\n assert area.base is imagedata\n area_done, average = process_area(thresh, area)\n if area.shape[0] < 3 or area.shape[1] < 3:\n area_done = True\n if (area_done):\n print(\"Finished area with size: (%d, %d)\\t\\t%.2f\"\n % (*area.shape[:2], calculate_percentage(imagedata,\n areas, areas_size)))\n remove_array(areas, area)\n areas_size -= area.size\n numpy.copyto(area, numpy.full(area.shape, average, dtype=numpy.float64))\n else:\n print(\"Splitting area with size: (%d, %d)\\t\\t%.2f\"\n % (*area.shape[:2], calculate_percentage(imagedata,\n areas, areas_size)))\n remove_array(areas, area)\n for q in quarter(area):\n areas.append(q)\n\n new_image = Image.fromarray((imagedata * 255).round().astype(numpy.uint8))\n new_image.save(output)\n\n\nif __name__ == \"__main__\":\n print(\"Square, version 0 - (c) 2015 Sam van Kampen\")\n print(\"Licensed under the GNU GPL version 3.0\")\n\n if len(sys.argv) == 1:\n print(\"Usage: sq <input> <output> [delta-e threshold]\")\n else:\n transform_image(sys.argv[1], sys.argv[2], float(sys.argv[3]))\n" }, { "alpha_fraction": 0.44172146916389465, "alphanum_fraction": 0.560669481754303, "avg_line_length": 22.23611068725586, "blob_id": "3fa71a3ae4fc336ef94f797d40b00aa4bd04e4c5", "content_id": "7bf40c3c3bfdfc3cc0fdcd3fcabe88949e4b11c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 96, "num_lines": 72, "path": "/square.c", "repo_name": "svkampen/square", "src_encoding": "UTF-8", "text": "#include <math.h>\n#include <stdlib.h>\n#include <stdio.h>\n\ndouble *xyz2lab(double arr[]) {\n\tdouble ref_x = 95.047;\n\tdouble ref_y = 100.000;\n\tdouble ref_z = 108.883;\n\n\tarr[0] /= ref_x; arr[1] /= ref_y; arr[2] /= ref_z;\n\n\tfor (int n = 0; n < 3; n++) {\n\t\tif (arr[n] > 0.008856)\n\t\t\tarr[n] = cbrtl(arr[n]);\n\t\telse\n\t\t\tarr[n] = (7.787 * arr[n]) + (16 / 116.0);\n\t}\n\n\tdouble *lab = malloc(3 * sizeof(double));\n\tlab[0] = (116.0 * arr[1]) - 16;\n\tlab[1] = 500.0 * (arr[0] - arr[1]);\n\tlab[2] = 200.0 * (arr[1] - arr[2]);\n\n\treturn lab;\n}\n\ndouble *rgb2xyz(double arr[]) {\n\tfor (int n = 0; n < 3; n++) {\n\t\tif (arr[n] > 0.04045)\n\t\t\tarr[n] = pow((arr[n] + 0.055) / 1.055, 2.4);\n\t\telse\n\t\t\tarr[n] /= 12.92;\n\t}\n\n\tfor (int n = 0; n < 3; n++) {\n\t\tarr[n] *= 100;\n\t}\n\n\tdouble *xyz = malloc(3 * sizeof(double));\n\txyz[0] = arr[0] * 0.4124 + arr[1] * 0.3576 + arr[2] * 0.1805;\n\txyz[1] = arr[0] * 0.2126 + arr[1] * 0.7152 + arr[2] * 0.0722;\n\txyz[2] = arr[0] * 0.0193 + arr[1] * 0.1192 + arr[2] * 0.9505;\n\n\treturn xyz;\n}\n\ndouble *rgb2lab(double arr[]) {\n\tdouble *xyz = rgb2xyz(arr);\n\tdouble *lab = xyz2lab(xyz);\n\tfree(xyz);\n\treturn lab;\n}\n\ndouble edistance(double lab1[], double lab2[]) {\n\treturn sqrt(pow(lab2[0] - lab1[0], 2) + pow(lab2[1] - lab1[1], 2) + pow(lab2[2] - lab1[2], 2));\n}\n\ndouble rgbdiff(double rgb1[], double rgb2[]) {\n\tdouble *lab1 = rgb2lab(rgb1);\n\tdouble *lab2 = rgb2lab(rgb2);\n\tdouble e = edistance(lab1, lab2);\n\tfree(lab1);\n\tfree(lab2);\n\treturn e;\n}\n\ndouble rgbdiff_p(double rgb1_r, double rgb1_g,double rgb1_b,\n\t\t\t\tdouble rgb2_r, double rgb2_g, double rgb2_b) {\n\tdouble rgb1[3] = {rgb1_r, rgb1_g, rgb1_b};\n\tdouble rgb2[3] = {rgb2_r, rgb2_g, rgb2_b};\n\treturn rgbdiff(rgb1, rgb2);\n}\n" } ]
6
johnsiwicki/drumminhands_photobooth
https://github.com/johnsiwicki/drumminhands_photobooth
05cefeb923a3883d1b00fbd31562a890edcba227
ca5773cdab0aa8036638f02e98f7fb71bc372f47
5ec08e35651ad4597eeea2550c54695c1dfd50fd
refs/heads/master
2021-01-18T22:45:00.395886
2015-08-30T20:57:12
2015-08-30T20:57:12
41,642,659
0
0
null
2015-08-30T20:50:08
2015-08-26T16:03:31
2015-03-29T02:22:41
null
[ { "alpha_fraction": 0.7584158182144165, "alphanum_fraction": 0.7584158182144165, "avg_line_length": 62.125, "blob_id": "3592afce13029bd1a9b6d94671a3f13662bc982b", "content_id": "65e950dac0f9433816f0ca09867ad73d4c3b1614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 87, "num_lines": 8, "path": "/config.py", "repo_name": "johnsiwicki/drumminhands_photobooth", "src_encoding": "UTF-8", "text": "# Copy this file to config.py and replace the values with your information\n# OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info\nconsumer_key='nlI5Wy0IZGOXqnh2uImDhDq1ykMtC0szYxGgkxije0GaSl24PN' #replace with your key\nconsumer_secret='YlszMjQpOec4mVDey8nH6sY6UGNCsQKerO8ADN4zapk0ZAbUiM' #replace with your secret code\noath_token='nlI5Wy0IZGOXqnh2uImDhDq1ykMtC0szYxGgkxije0GaSl24PN' #replace with you oath token\noath_secret='YlszMjQpOec4mVDey8nH6sY6UGNCsQKerO8ADN4zapk0ZAbUiM' #replace with your oath secret code\ntumblr_blog = 'Photo Booth' # replace with your tumblr account name without .tumblr.com\nfile_path = '/home/pi/photobooth/' # path to save images\n" } ]
1
koldunchik/own-agent-open
https://github.com/koldunchik/own-agent-open
2cddebebb3ddb1af024fe7fbbc51796cfab785e3
e138ba823b2c27ab583f18b35fa74843e3990975
5d5508b5c129da23c47f523b94b388442c43424b
refs/heads/master
2021-08-23T06:01:57.613157
2017-12-03T19:52:47
2017-12-03T19:52:47
112,958,153
0
0
null
2017-12-03T19:47:06
2017-12-02T06:34:08
2017-12-03T19:39:09
null
[ { "alpha_fraction": 0.6450556516647339, "alphanum_fraction": 0.647893488407135, "avg_line_length": 33.443607330322266, "blob_id": "1f29472c1923c66df08613c7c7bbf8907ed18a05", "content_id": "e4c755b07191080634528d93174d903295fff420", "detected_licenses": [ "LicenseRef-scancode-other-permissive" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4581, "license_type": "permissive", "max_line_length": 114, "num_lines": 133, "path": "/agents_platform/hello_world_agent.py", "repo_name": "koldunchik/own-agent-open", "src_encoding": "UTF-8", "text": "import json\nimport re\nimport threading\nimport time\nimport traceback\n\nimport websocket\n\nimport logger\nimport student\nimport teacher\n\nfrom own_adapter.agent import Agent\nfrom own_adapter.board import Board\nfrom own_adapter.element import Element\nfrom own_adapter.platform_access import PlatformAccess, get_agent\n\ndef periodical_update():\n \"\"\"Does periodical work with a predefined time interval\"\"\"\n time_interval = 86400\n\n while True:\n time.sleep(time_interval)\n logger.info('helloworld', 'Daily news update is done.')\n\n\ndef process_added_element(message_dict):\n element_caption = message_dict['newCaption']\n if re.match(pattern='@ta:.+', string=element_caption):\n student.get_new_assignment(message_dict)\n\n\ndef process_added_file(message_dict):\n file_link = message_dict['path']\n element_link = '/'.join(file_link.split('/')[:-2])\n board_link = '/'.join(file_link.split('/')[:-4])\n agent = get_agent()\n board = Board.get_board_by_id(board_link, agent.get_platform_access(), need_name=False)\n element = Element.get_element_by_id(element_link, agent.get_platform_access(), board)\n caption = element.get_name()\n\n added_file = None\n for file in element.get_files():\n platform_url = agent.get_platform_access().get_platform_url()\n if file.get_url()[len(platform_url):] == file_link:\n added_file = file\n break;\n\n logger.debug('helloworld', caption)\n if re.match(pattern='@ta_assignment:.+', string=caption) and len(element.get_files()) == 1:\n teacher.send_new_assignment(agent, board, element, added_file)\n if re.match(pattern='@assignment:.+', string=caption) and len(element.get_files()) > 1:\n student.send_solution(agent, board, element, added_file)\n if re.match(pattern='@ta_assignment:.+', string=caption) and len(element.get_files()) > 1:\n teacher.recieve_solution(agent, board, element, added_file)\n if re.match(pattern='@ta_assignment_graded:.+', string=caption):\n teacher.send_grades(agent, board, element, added_file)\n\n\n\n\ndef on_websocket_message(ws, message):\n \"\"\"Processes websocket messages\"\"\"\n message_dict = json.loads(message)\n content_type = message_dict['contentType']\n message_type = content_type.replace('application/vnd.uberblik.', '')\n\n logger.debug('helloworld', message)\n\n if message_type == 'liveUpdateElementCaptionEdited+json':\n process_added_element(message_dict)\n if message_type == 'liveUpdateFileAdded+json':\n process_added_file(message_dict)\n\n\ndef on_websocket_error(ws, error):\n \"\"\"Logs websocket errors\"\"\"\n logger.error('helloworld', error)\n\n\ndef on_websocket_open(ws):\n \"\"\"Logs websocket openings\"\"\"\n logger.info('helloworld', 'Websocket is open')\n\n\ndef on_websocket_close(ws):\n \"\"\"Logs websocket closings\"\"\"\n logger.info('helloworld', 'Websocket is closed')\n\n\ndef open_websocket():\n \"\"\"Opens a websocket to receive messages from the boards about events\"\"\"\n agent = get_agent()\n # getting the service url without protocol name\n platform_url_no_protocol = agent.get_platform_access().get_platform_url().split('://')[1]\n access_token = agent.get_platform_access().get_access_token()\n url = 'ws://{}/opensocket?token={}'.format(platform_url_no_protocol, access_token)\n\n ws = websocket.WebSocketApp(url,\n on_message=on_websocket_message,\n on_error=on_websocket_error,\n on_open=on_websocket_open,\n on_close=on_websocket_close)\n ws.run_forever()\n\n\ndef run():\n websocket_thread = None\n updater_thread = None\n\n while True:\n # opening a websocket for catching server messages\n if websocket_thread is None or not websocket_thread.is_alive():\n try:\n websocket_thread = threading.Thread(target=open_websocket)\n websocket_thread.start()\n except Exception as e:\n logger.exception('helloworld', 'Could not open a websocket. Exception message: {}'.format(str(e)))\n\n # periodical updates\n if updater_thread is None or not updater_thread.is_alive():\n try:\n updater_thread = threading.Thread(target=periodical_update)\n updater_thread.start()\n except Exception as e:\n logger.exception('helloworld', 'Could not start updater. Exception message: {}'.format(str(e)))\n\n # wait until next check\n time.sleep(10)\n\n\nif __name__ == '__main__':\n run()\n" }, { "alpha_fraction": 0.6443271636962891, "alphanum_fraction": 0.6464380025863647, "avg_line_length": 31.13559341430664, "blob_id": "fb0d69274d422c336e7bb6eacda11b03eb6cbe5c", "content_id": "0012e52ffa0ac6a6bc5a27c2e74b10e688381cfb", "detected_licenses": [ "LicenseRef-scancode-other-permissive" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1895, "license_type": "permissive", "max_line_length": 98, "num_lines": 59, "path": "/agents_platform/student.py", "repo_name": "koldunchik/own-agent-open", "src_encoding": "UTF-8", "text": "import json\nimport re\nimport threading\nimport time\nimport traceback\nimport urllib\n\nimport websocket\n\nimport logger\nfrom own_adapter.agent import Agent\nfrom own_adapter.board import Board\nfrom own_adapter.element import Element\nfrom own_adapter.platform_access import PlatformAccess, get_agent\nfrom spell_check import addhints\n\ndef send_solution(agent, student_board, student_element, solution_file):\n logger.debug('helloworld', 'student sending solution')\n ta_element = None\n ta_board = None\n for board in agent.get_boards():\n for element in board.get_elements():\n if element.get_name() == '@ta_' + student_element.get_name()[1:]:\n if ta_element is not None:\n logger.error('helloworld', 'more than one ta found')\n ta_element = element\n ta_board = board\n break\n\n #TODO change to getting board creator name\n student_name = 'RustamGafarov' #student_board.get_name()\n file_link = agent.get_platform_access().get_platform_url() + solution_file.get_download_link()\n downloaded_file = urllib.request.urlopen(file_link)\n to_send = downloaded_file\n print(solution_file.get_name())\n\n g = None\n if solution_file.get_name()[-4:] == '.txt':\n text = str(downloaded_file.read())\n hints = addhints(text[2:-3])\n tmp_file = '/tmp/' + student_name + '.txt'\n f = open(tmp_file, 'w')\n f.write(hints)\n f.close()\n g = open(tmp_file)\n to_send = g\n\n\n logger.debug('helloworld', 'copypaste the solution')\n ta_element.put_file(solution_file.get_name(), to_send)\n\n if g is not None:\n g.close()\n\n message_for_ta = \"Student \" + student_name + \" just sent his solution\"\n ta_board.put_message(message_for_ta)\n\n message_for_student = \"Assignment sucessfully sent\"\n student_board.put_message(message_for_student)" }, { "alpha_fraction": 0.5866336822509766, "alphanum_fraction": 0.6056105494499207, "avg_line_length": 27.880952835083008, "blob_id": "37d9b88917ffd6196d055eda20e2b03c738eb63e", "content_id": "8e1123d023149af0e397caf225d62e3bcfb0c6c1", "detected_licenses": [ "LicenseRef-scancode-other-permissive" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/agents_platform/spell_check.py", "repo_name": "koldunchik/own-agent-open", "src_encoding": "UTF-8", "text": "import http.client, urllib, json, requests, json, string\nfrom pprint import pprint\n\ninput = 'Hllo wrld! Wlcome tt owr exibition. Just gimme som bad dada'\n\ndef addhints( input ):\n\n text = {'text' : input}\n key = 'c8453bb975854d9a9b7ed362a3369a1b'\n host = 'https://api.cognitive.microsoft.com'\n path = '/bing/v7.0/spellcheck'\n params = '?mkt=en-us&mode=proof&text=' + urllib.parse.urlencode(text)\n\n headers = {'Ocp-Apim-Subscription-Key': key,\n 'Content-Type': 'application/x-www-form-urlencoded'}\n\n r = requests.get(host + path + params, headers=headers);\n jsont = json.loads(r.text)\n out = text['text']\n words = out.split()\n\n s = {}\n for token in jsont['flaggedTokens']:\n hint = '['\n hint += \"/\".join(map(lambda x: x['suggestion'], token['suggestions']))\n hint += ']'\n s[token['token']] = hint\n\n output = \"\"\n translator = str.maketrans('', '', string.punctuation)\n for word in words:\n output += word\n trimmed_word = word.translate(translator)\n if s.get(trimmed_word) != None:\n output += s.get(trimmed_word)\n output += \" \"\n\n return output\n\n\nprint (\"in:\" + input)\nprint (\"out:\" + addhints(input))" }, { "alpha_fraction": 0.6435490250587463, "alphanum_fraction": 0.6450988054275513, "avg_line_length": 29.738094329833984, "blob_id": "635f4ccd81da62def3b8f96ce170879404eb1e69", "content_id": "d7f2e010809b2bec2747a8591821875f9f6b1d11", "detected_licenses": [ "LicenseRef-scancode-other-permissive" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2581, "license_type": "permissive", "max_line_length": 100, "num_lines": 84, "path": "/agents_platform/teacher.py", "repo_name": "koldunchik/own-agent-open", "src_encoding": "UTF-8", "text": "import json\nimport re\nimport threading\nimport time\nimport traceback\nimport urllib\n\nimport websocket\n\nimport logger\nfrom own_adapter.agent import Agent\nfrom own_adapter.board import Board\nfrom own_adapter.element import Element\nfrom own_adapter.platform_access import PlatformAccess, get_agent\n\n\ndef __do_something(element):\n \"\"\"Write your code here\"\"\"\n\n # examples:\n # put a message to a board\n message = 'Hello world!'\n element.get_board().put_message(message)\n\n # put a URL to an element\n url = 'https://www.own.space/'\n element.put_link(url)\n\n\ndef check_grammar(file):\n #TODO implement grammar checking\n path_to_corrected_file = \"\"\n return path_to_corrected_file\n\n\n\ndef send_new_assignment(agent, sender_board, assignment_element, assignment_file):\n logger.debug('helloworld', 'teacher sending an assignment')\n if assignment_element is None:\n logger.debug('helloworld', 'element is null')\n return\n\n if assignment_file is None:\n logger.debug('helloworld', 'file is null')\n return\n\n file_link = agent.get_platform_access().get_platform_url() + assignment_file.get_download_link()\n downloaded_file = urllib.request.urlopen(file_link)\n\n all_boards = agent.get_boards()\n for board in all_boards:\n if board.get_id() != sender_board.get_id():\n logger.debug('helloworld', 'teacher sending an assignment to ' + board.get_name())\n element_name = '@' + assignment_element.get_name()[4:]\n\n matrix = board.get_elements_matrix()\n for pos_y, pos_x in ((y, x) for y in range(len(matrix)) for x in range(len(matrix[y]))):\n if matrix[pos_y][pos_x] == 0:\n # TODO: remove hardcoded string\n board.add_element(pos_x + 1, pos_y + 1, caption=element_name)\n break\n\n for element in board.get_elements():\n if element.get_name() == element_name:\n logger.debug('helloworld', 'copypaste the assignment')\n element.put_file(assignment_file.get_name(), downloaded_file)\n\n message_for_student = \"You have a new assignment.\"\n board.put_message(message_for_student)\n\n\n\n message = 'Assignment ' + assignment_element.get_name() + ' have been sent'\n sender_board.put_message(message)\n\n\ndef recieve_solution(agent, board, element, file):\n logger.debug('helloworld', 'teacher recieving solution')\n pass\n\n\ndef send_grades(agent, board, element, file):\n logger.debug('helloworld', 'teacher sending grades')\n pass" }, { "alpha_fraction": 0.5378821492195129, "alphanum_fraction": 0.5463004112243652, "avg_line_length": 29.917808532714844, "blob_id": "2b325b949bb0f3540c7f01b8b5d1ae4679a3a6f9", "content_id": "9081d01f58be23c8c8ea5caca164b84572fcb9c1", "detected_licenses": [ "LicenseRef-scancode-other-permissive" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2257, "license_type": "permissive", "max_line_length": 122, "num_lines": 73, "path": "/agents_platform/notifications_agent.py", "repo_name": "koldunchik/own-agent-open", "src_encoding": "UTF-8", "text": "import json\nimport re\nimport threading\nimport time\nimport traceback\n\nimport logger, pprint\nfrom own_adapter.agent import Agent\nfrom own_adapter.board import Board\nfrom own_adapter.element import Element\nfrom own_adapter.platform_access import PlatformAccess, get_agent\nfrom datetime import datetime, timedelta\n\n# Notify X minutes before deadline\nDEADLINE_MINUTES = 30\n\n# Notify Y days before deadline\nDEADLINE_DAYS = 1\n\ndef run():\n notification_status = {}\n\n while True:\n # periodical updates\n\n agent = get_agent()\n platform_access = agent.get_platform_access()\n\n boards = agent.get_boards()\n\n time.sleep(3)\n\n for board in boards:\n name = board.get_name()\n print (name)\n if name.lower() != 'Student Board'.lower():\n continue;\n elements = board.get_elements()\n for element in elements:\n ename = element.get_name()\n if not ename.startswith(\"@assignment\"):\n continue\n\n print (\"ename:\" + ename)\n m = re.search('\\d{2,4}-\\d{1,2}-\\d{1,2}\\s+\\d{1,2}\\:\\d{1,2}', ename)\n if not m:\n print (\"notificator:invalid format \")\n continue\n\n alert_date = m.group(0)\n datetime_minutes = datetime.strptime(alert_date, '%Y-%m-%d %H:%M') - timedelta(minutes = DEADLINE_MINUTES)\n datetime_days = datetime.strptime(alert_date, '%Y-%m-%d %H:%M') - timedelta(days = DEADLINE_DAYS)\n\n if datetime.now() >= datetime_minutes or datetime.now() >= datetime_days:\n nstatus = notification_status.get(ename)\n if nstatus != None:\n if nstatus >= datetime_minutes:\n continue\n\n print (datetime_minutes)\n print (datetime_days)\n board.put_message(\"Attention! Deadline is soon for \" + ename + \" Think about it!\")\n notification_status[ename] = datetime.now()\n print (\"notification sent\")\n\n print (\"round\")\n\n # wait until next check\n time.sleep(1000)\n\n\nif __name__ == '__main__':\n run()\n" } ]
5
cosinusSymfony/Symfony
https://github.com/cosinusSymfony/Symfony
05612cb3c4f450f62e18ed8ca3ab4351aa926197
872fc7a5c2ea147933181900ba62b94b1521a076
2173fa44fbede243b12519fa7ef0b4258e943166
refs/heads/main
2023-09-04T11:54:29.824205
2021-10-13T01:01:36
2021-10-13T01:01:36
416,308,370
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7485029697418213, "alphanum_fraction": 0.7485029697418213, "avg_line_length": 22.85714340209961, "blob_id": "b00be9e7c415854eea3c5f0f4a9553499a976279", "content_id": "bf9d87113d53a6a52819e5b33c7514ffa4f85a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 55, "num_lines": 7, "path": "/test1.py", "repo_name": "cosinusSymfony/Symfony", "src_encoding": "UTF-8", "text": "import requests\n\nr = requests.get('https://python.org')\nprint(r.status_code)\nprint(r.content)\nprint(r.cookies)\nprint(b'Python is a programing language ' in r.content)\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 15.5, "blob_id": "75fd55271faa0f2dfc715b62d7120cc46226b39b", "content_id": "0ff1dc02cfd65a12aa85b880748d058113c9ec5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "cosinusSymfony/Symfony", "src_encoding": "UTF-8", "text": "# Symfony\nAll data to create App \n" } ]
2
apatten001/client_tracker
https://github.com/apatten001/client_tracker
ef08117ce4722ae0c260cce67f67f5e9cc8e2c8c
4428761acc3b7e4a8de08f4bd34fcc315b54fc66
689f86a7aa854b2e6efce0fd2d2d5de1ed16dd83
refs/heads/master
2020-04-08T06:13:18.579421
2018-11-29T20:30:07
2018-11-29T20:30:07
159,089,514
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6957295536994934, "alphanum_fraction": 0.7046263217926025, "avg_line_length": 28.526315689086914, "blob_id": "f5cbca7b9a60f882e1103cf6eefe9a79214e7477", "content_id": "6d48e6ef2cc5e08bec963fac9d3fda00dd01dcd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/measurement/models.py", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom client.models import Client\n\n# Create your models here.\n\n\nclass Measurement(models.Model):\n\n profile = models.ForeignKey(Client, on_delete=models.CASCADE)\n current_weight = models.FloatField(default=0)\n goal_weight = models.FloatField(default=0)\n height = models.FloatField()\n body_fat = models.FloatField()\n plank = models.CharField(max_length=120)\n date = models.DateField(auto_now_add=True)\n goal_date = models.DateField()\n\n def __str__(self):\n return str(self.profile) + \"'s measurements\"\n\n" }, { "alpha_fraction": 0.7620320916175842, "alphanum_fraction": 0.7620320916175842, "avg_line_length": 23.53333282470703, "blob_id": "343e02d601fd05b772336cc639be7e9ef99c29e0", "content_id": "1feb7facdcf3821f2e76031776c495799b7dc006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 374, "license_type": "no_license", "max_line_length": 65, "num_lines": 15, "path": "/README.md", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "# Client Tracker\n\nClient Tracker is a web app designed to store personal \ntrainers clients contact information, as well as\ngoals and measurements. It has a simple *dashboard* that displays\na clients info for easy recall access.\n\n\n## A list of things tracked\n- Clients Name\n- Current Weight\n- Goal Weight\n- body fat percentage\n- Birthday (for reminders)\n- Phone number\n\n\n " }, { "alpha_fraction": 0.6727688908576965, "alphanum_fraction": 0.6910755038261414, "avg_line_length": 28, "blob_id": "a524c3685bdd07289a96bf0da2b37c736a1b60ae", "content_id": "3c155da270502a62989e49d81bb7d4c3f10d96e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 62, "num_lines": 15, "path": "/client/models.py", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass Client(models.Model):\n\n first_name = models.CharField(max_length=120, blank=False)\n last_name = models.CharField(max_length=120, blank=False)\n Birthday = models.DateField()\n phone_number = models.CharField(max_length=30, blank=True)\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n" }, { "alpha_fraction": 0.5414798259735107, "alphanum_fraction": 0.5661435127258301, "avg_line_length": 30.85714340209961, "blob_id": "7a1b20a30851a4fc5d5e23de9e13060e9cd8f823", "content_id": "9dae39901ee5c76545bf82c58840627538337f5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/measurement/migrations/0001_initial.py", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-24 21:42\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('client', '0002_client_active'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Measurement',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('weight', models.FloatField()),\n ('height', models.FloatField()),\n ('body_fat', models.FloatField()),\n ('plank', models.CharField(max_length=120)),\n ('date', models.DateField(auto_now_add=True)),\n ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='client.Client')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6929460763931274, "alphanum_fraction": 0.6929460763931274, "avg_line_length": 25.88888931274414, "blob_id": "cfbd4ebd72b7f754abe9379ec5a07d98f01aef60", "content_id": "5134c94fc7b3edd8eceee9f7b573a84a3e51a5d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 67, "num_lines": 9, "path": "/client/urls.py", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import client_list_view, ClientProfileView\n\napp_name = 'client'\nurlpatterns = [\n path('list/', client_list_view, name='list'),\n path('<int:pk>/', ClientProfileView.as_view(), name='profile'),\n\n]" }, { "alpha_fraction": 0.7182080745697021, "alphanum_fraction": 0.7182080745697021, "avg_line_length": 31.761905670166016, "blob_id": "da03d6f4c768174146c4452b75a496ed7e332803", "content_id": "bf2451e13510b23dbc0e51ccf225f3d10d7586db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/client/views.py", "repo_name": "apatten001/client_tracker", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom .models import Client\nfrom measurement.models import Measurement\n# Create your views here.\n\n\ndef client_list_view(request):\n clients = Client.objects.all()\n return render(request, 'client/home.html', {'clients': clients})\n\n\nclass ClientProfileView(DetailView):\n # page_content = 'returns a profile of a specific client'\n template_name = 'client/profile.html'\n queryset = Measurement.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super(ClientProfileView, self).get_context_data(**kwargs)\n context['client'] = Client.objects.all()\n return context\n\n\n\n\n" } ]
6
PaDS5/DataVisualisationProject
https://github.com/PaDS5/DataVisualisationProject
39888edbd84622dd4e6e3099366da4fa5d675e44
7bfca11b20b39a5dce6d623825b7703734ae29b2
39895d1a2c599e18a91eda9b8ecfbde925ae5240
refs/heads/master
2023-03-29T03:25:40.081759
2021-04-02T11:03:50
2021-04-02T11:03:50
347,109,151
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7586637735366821, "alphanum_fraction": 0.7658445239067078, "avg_line_length": 68.65217590332031, "blob_id": "07f9e81c094acf872a628d4cffa7ddd340b92425", "content_id": "58e71ca6c6168453275c7aac009c04f1014f7d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3205, "license_type": "no_license", "max_line_length": 351, "num_lines": 46, "path": "/README.md", "repo_name": "PaDS5/DataVisualisationProject", "src_encoding": "UTF-8", "text": "# COVID-19 in France : A first look at the global situation and vaccination process \n\n## Presentation of the datasets \n\n### COVID-19 dataset\n\n Concerning the COVID-19 dataset, we have observations since March 2020 up to today because the dataset is updated every day. We will explain after how we manage to get the correct dataset without downloading every version every day.\n Here, let's have a look at the dataset: \n\n\n![COVID-19 Dataset](images/covid_dataset_1.png)\n\n We see basics informations such as the department, the gender, the day and the then the four indicators of the situation: \n - hosp : Indicate the number of hospitalisations\n - rea : Incated the number of reanimation\n - rad : Indicate the number of people that got back home\n - dec : Indicate the number of deaths\n\n We have all the informations to have some visualisation. However, we will create new columns at some points to group the hospitalisations by date for example or other things like that. \n\n\n### Vaccination dataset\n\n For the vaccination dataset, we took the one provided by the government. Like the previous dataset, this one is also updated every day but we have less observations since the vaccination campaign started at the end of December 2020. \n\n![Vaccination dataset](images/vacci_dataset_1.png)\n\n Like before, we have some basics informations and now we have the numnber of doses administrated every day. We also have the cumulated number which will be useful for the representation. \n However, we see some changes in the organisation. This time, the dataset is ordered by department and not by date so we will have to do different operations on this dataset.\n\n## How we managed to get the latest dataset from the data.gouv website\n\n One of the difficulties for us was that the datasets are updated daily and so we need to get the latest updated one to have correct visualisation. \n To solve this problem, we used some NLP technique that we saw some weeks ago : BeautifulSoup library.\n With that, we can look for all links in the html page and get a list of it. To have smaller list of results, we just select links that have the word \"Tรฉlรฉcharger\". Even if the hour of publication is different, the order in the list is always the same. We just have to select the second link as it is the one that we want for the COVID-19 dataset. \n\n Concerning the vaccination dataset, the structure of the file was updated recently and so all the operations that we did on the dataset weren't working anymore so we decided to stick with the original dataset that we took, stopping at March, 2nd.\n\n\n## Dashboard structure\n\n Concerning the dahsboard, since we are working with two datasets, we will have two distinct visualisation. To get these 2 visualisations on the same dashboard, we decided to put two links on the dashboard, each one displaying one or the other visualisation. Here an image of the presentation page of the dashboard: \n\n![Presentation Page](images/dashboard_1.png)\n\n Here we see the two links to access the two visualisations. These links are permanent meaning that we can switch from one visualisation to another at any time." }, { "alpha_fraction": 0.6653465628623962, "alphanum_fraction": 0.6693069338798523, "avg_line_length": 45, "blob_id": "92d718fb70016eb21309eb05fd53f581303d0a6e", "content_id": "cd12919cde12ef0e4e884a61ded7f53c632eeba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 101, "num_lines": 11, "path": "/app.py", "repo_name": "PaDS5/DataVisualisationProject", "src_encoding": "UTF-8", "text": "import dash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n# meta_tags are required for the app layout to be mobile responsive\n\n#We define the app parameters like the .css file to use it when we run the dashboard\napp = dash.Dash(__name__,external_stylesheets=external_stylesheets,suppress_callback_exceptions=True,\n meta_tags=[{'name': 'viewport',\n 'content': 'width=device-width, initial-scale=1.0'}]\n )\nserver = app.server" }, { "alpha_fraction": 0.632446825504303, "alphanum_fraction": 0.651063859462738, "avg_line_length": 37.367347717285156, "blob_id": "9995057b0940280417261efbb57b55c212e10855", "content_id": "8f11f927b47c149fec8541a84911e260f77491da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1880, "license_type": "no_license", "max_line_length": 176, "num_lines": 49, "path": "/main.py", "repo_name": "PaDS5/DataVisualisationProject", "src_encoding": "UTF-8", "text": "import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Connect to main app.py file\nfrom app import app\nfrom app import server\n\n# Connect to your app pages\nfrom apps import covid, vacci\n\n#Here we will launch the app and define the layout. We have something to add, which is the links for the COVID 19 informations or the vaccination informations\n\n\napp.layout = html.Div([\n html.H1('Evolution of the COVID-19 situation in France', style={\"textAlign\": \"center\"}),\n dcc.Location(id='url', refresh=False),\n\n html.Div([\n html.Br(),\n ], className=\"three columns\"),\n\n #Here we defined links to click on to siplay COVID-19 informations or vaccination informations. We can switch from one to another at any moment\n\n html.Div([\n dcc.Link('Covid-19 in France',className=\"three columns\", href='/apps/covid', #We defined the links to display the COVID-19 layout here\n style={\"font\":\"bold 30px Arial\", \"text_decoration\":\"none\", \"padding\":\"2px 6px 2px 6px\", \"color\":\"#333333\", \"background-color\":\"EEEEEE\", \"textAlign\":\"center\"}),\n dcc.Link('Vaccination in France',className=\"three columns\", href='/apps/vacci',\n style={\"font\": \"bold 30px Arial\", \"text_decoration\": \"none\", \"padding\": \"2px 6px 2px 6px\",\n \"color\": \"#333333\", \"background-color\": \"EEEEEE\"}\n ),\n ], className=\"row\"),\n html.Div(id='page-content', children=[])\n])\n\n#We retrun the layouts based on what is asked by the user\[email protected](Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/apps/covid':\n return covid.layout\n if pathname == '/apps/vacci':\n return vacci.layout\n else:\n return \"\"\n\n\nif __name__ == '__main__':\n app.run_server(debug=False)\n" }, { "alpha_fraction": 0.5520572066307068, "alphanum_fraction": 0.5720019936561584, "avg_line_length": 37.1483268737793, "blob_id": "1a34886f8e659d479da14d17a7702492feadee53", "content_id": "967406f29a3326b5798c173fc2b2ccab7dbaeab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7972, "license_type": "no_license", "max_line_length": 171, "num_lines": 209, "path": "/apps/vacci.py", "repo_name": "PaDS5/DataVisualisationProject", "src_encoding": "UTF-8", "text": "import dash as dash\nimport geojson as geojson\nfrom dash import Dash\nimport plotly.express as px\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport numpy as np\nfrom app import app\n\n### DISCLAIMER : A lot of code is similar that we used in the covid.py file for graphs. Only the dataset and de varialbes to display change.\n\n#Concerning the vaccination dataset, the recent changes made on the available dataset are not working with what we defined here so we kept the original dataset that we had\n\ndf = pd.read_csv(\"apps/vacsi-s-dep-2021-03-02-20h15.csv\", sep=\",\")\ndf = df.iloc[8:]\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n#app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n#server = app.server\n\ndf = df.rename(columns={'n_dose1': 'dose_1', 'n_dose2': 'dose_2', 'jour':'day', 'n_cum_dose1':'cumulated_dose_1', 'n_cum_dose2':'cumulated_dose_2', 'dep':'department'})\n\nliste_y = ['dose_1', 'dose_2', 'cumulated_dose_1', 'cumulated_dose_2']\nliste_x = ['day']\nliste_dep = ['department']\n\ndf_firstgraph = df[df['sexe'] == 0]\ndf_filtered = df[df['sexe'] != 0]\ndf_filtered['sexe_str'] = df_filtered['sexe'].apply(lambda x: \"Men\" if x == 1 else \"Women\")\n\ndf_new_filtered = df[df['sexe'] == 0].groupby('department')\n\nwith open('apps/departements.geojson') as file:\n geo = geojson.load(file)\n\n#This time for the map, we set the date to the last date in the dataset which is March, 1st since we took the dataset of March 2nd\n\ndf_map = df[(df['day'] == '2021-03-01') & (df['sexe'] == 0)]\ndf_map.drop(df_map.tail(1).index, inplace=True)\n\ndf_dep_reg = pd.read_csv('apps/departements-region.csv')\n\ndf_map['dep_name'] = df_dep_reg['dep_name'].to_numpy()\ndf_map['region_name'] = df_dep_reg['region_name'].to_numpy()\n\ndf_curr_situation_vac_hosp = df_map.groupby(['region_name'], as_index=False).max()\n\ndf_daily_vac = df.groupby(['day'], as_index=False).sum()\nfrance_map = px.choropleth(df_map, geojson=geo, locations='department', featureidkey=\"properties.code\", color='cumulated_dose_1',\n labels={'dc': 'Total vaccination'}, color_continuous_scale=\"Viridis\", scope=\"europe\",\n hover_name='dep_name', hover_data=['dose_1', 'dose_2'])\nfrance_map.update_geos(showcountries=False, showcoastlines=False, showland=False, fitbounds=\"locations\")\n# france_map.update_layout(margin={\"r\":50,\"t\":50,\"l\":50,\"b\":50})\nfrance_map.update_layout(width=1000, height=1000)\n\n#This time we define the graph to represent the vaccination per gender for first dose\n\nfig_vacci_gender = px.pie(df_filtered, values=\"dose_1\", names=\"sexe_str\", title=\"Vaccination 1 by Gender\")\nfig_vacci_gender.update_traces(textposition='outside',\n textinfo='percent+label',\n marker=dict(line=dict(color='#000000',\n width=2)),\n pull=[0.05, 0, 0.03],\n opacity=0.9,\n # rotation=180\n )\n\n\n#Graphs for total and daily administration of vaccine (by dose)\n\nfig_total_vac1 = px.scatter(df_daily_vac, x=\"day\", y='cumulated_dose_1',\n title=\"Evolution of the cumulated number of people vaccinated stage 1 in France\")\nfig_total_vac2 = px.scatter(df_daily_vac, x=\"day\", y='cumulated_dose_2',\n title=\"Evolution of the cumulated number of people vaccinated stage 2 in France\")\nfig_daily_vac1 = px.scatter(df_daily_vac, x=\"day\", y='dose_1',\n title=\"Evolution of the number of people vaccinated stage 1 in France each day\")\nfig_daily_vac2 = px.scatter(df_daily_vac, x=\"day\", y='dose_2',\n title=\"Evolution of the number of people vaccinated stage 2 in France each day\")\n\n#For the definition of the layout, we kept the same structure as the one for the covid information. We just change the graphs that are displayed.\n\nlayout = html.Div([\n html.H1('Covid Vaccination Data in France', style={\"textAlign\": \"center\"}),\n\n html.Div([\n html.Div([\n html.H4('Global evolution in France', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n html.P('Here we can choose the parameter that we want to see across time',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P('We can see the number of vaccination every day\\n',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.Div(dcc.Dropdown(\n id='yaxis-column23',\n options=[{'label': i, 'value': i} for i in liste_y],\n value='cumulated_dose_1'\n )),\n ], className=\"three columns\", style={\"padding\": \"3%\"}),\n\n html.Div([\n dcc.Graph(id='first_graph23', figure={})\n ], className=\"nine columns\")\n\n ], className='row', style={\"backgroundColor\": \"#313131\"}),\n\n html.Br(),\n html.Br(),\n\n html.Div([\n\n html.Div([\n dcc.Graph(\n id='graph123',\n figure=fig_total_vac1\n ),\n\n html.Br(),\n\n dcc.Graph(\n id='graph423',\n figure=fig_total_vac2),\n ], className='six columns'),\n\n html.Div([\n dcc.Graph(\n id='graph30000',\n figure=fig_daily_vac1\n ),\n\n html.Br(),\n\n dcc.Graph(\n id='graph20000',\n figure=fig_daily_vac2),\n ], className='six columns'),\n\n # dcc.Graph(id = 'third-graph', figure= fig_vacci_gender, className='three columns'),\n # dcc.Graph(id = 'fourth-graph', figure= fig_vacci_gender, className='three columns')\n ], className=\"row\", style={\"border\": \"solid\", \"padding\": \"2%\"}),\n\n html.Div([\n\n html.Div([\n html.Div([\n html.H2('Vaccination across Gender', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n\n html.P('Let\\'s have a look at the proportion of the first dose by gender',\n style={\"textAlign\": \"center\", \"color\": \"white\"}),\n html.Br(),\n html.Br(),\n\n ], className=\"six columns\", style={\"padding\": \"3%\"}),\n\n html.Div([\n dcc.Graph(id='pie_graph', figure=fig_vacci_gender)\n ], className=\"six columns\")\n\n ], className='row', style={\"backgroundColor\": \"#313131\"}),\n\n ], style={\"padding\": \"3%\", \"border\": \"solid\"}),\n\n html.Div([\n\n html.Div([\n dcc.Graph(\n id='graph823',\n figure=france_map),\n ], className=\"seven columns\", style={\"height\": \"100%\"}),\n\n html.Div([\n html.H4('Informations for each department', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n\n html.P('You can have a look on the situation of every department with the map',\n style={\"textAlign\": \"center\", \"color\": \"white\"}),\n html.Br(),\n html.Br(),\n\n ], className=\"five columns\", style={\"backgroundColor\": \"#313131\", \"padding\": \"3%\"}),\n\n ], className=\"row\", style={\"padding\": \"3%\", \"border\": \"solid\"}),\n\n\n], style={\"padding\": \"3%\"})\n\n\[email protected](\n Output('first_graph23', 'figure'),\n [Input(\"yaxis-column23\", \"value\")]\n)\ndef update_figure(yaxis_column_name):\n return px.scatter(\n df_firstgraph, x=df_firstgraph['day'], y=df_firstgraph[yaxis_column_name], color='department',\n render_mode=\"webgl\", title=\"Evolution of the COVID-19 vaccination in France\"\n )\n\n\n#if __name__ == '__main__':\n # app.run_server(debug=False)" }, { "alpha_fraction": 0.5801920890808105, "alphanum_fraction": 0.5905794501304626, "avg_line_length": 41.28453063964844, "blob_id": "ff0886df4231583a457287db65fabc1cb86e78ef", "content_id": "0f682d5c7dac7871f3ec27524fc79b5793e9615e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15311, "license_type": "no_license", "max_line_length": 191, "num_lines": 362, "path": "/apps/covid.py", "repo_name": "PaDS5/DataVisualisationProject", "src_encoding": "UTF-8", "text": "import dash as dash\nimport geojson as geojson\nfrom dash import Dash\nimport plotly.express as px\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nfrom app import app\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nimport requests\n\n#First, we get the current date to have graphs with the last informations\n\ncurrent_date = datetime.today() - timedelta(1)\ncurrent_date = current_date.strftime('%Y-%m-%d')\n\n#Code to get last updated dataset from the French government\n#To do this, we use BeautifulSoup to parse the website and find all links associated to the word \"Tรฉlรฉcharger\".\n# We saw that the order is always the same and so we just get the second link parsed that corresponds to the dataset that we want to use\n\npage = requests.get(\"https://www.data.gouv.fr/fr/datasets/donnees-hospitalieres-relatives-a-lepidemie-de-covid-19/\")\nsoup = BeautifulSoup(page.text, 'html.parser')\nlinks = soup.find_all('a', href = True, text = 'Tรฉlรฉcharger')\ndataset_link = links[1]['href']\n\nprint(\"Link dataset\", dataset_link) #We display the link to be sure to get the good dataset. Then we just proceed normally with a .csv file\n\n\ndf = pd.read_csv(dataset_link, sep=\";\")\ndf = df.rename(columns={'hosp': 'hospitalisations', 'rea': 'reanimation', 'jour':'day', 'rad':'return home', 'dc':'deaths', 'dep':'department'})\n\n#This external_stylesheets is the one that allow us to use something similar to Bootstrap for the disposition on the Dashboard\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n\n#These three lists will be used for the first graph displaying the global situation in France\n#We will have the choice to choose between the number of Hospitalisations, Reanimations, Home returns eand Deceased displayed in function of the day. Basically, it's just to show\n#the evolution of the pandemic in France since the beginning\nliste_y = ['hospitalisations', 'reanimation', 'return home', 'deaths']\nliste_x = ['day']\nliste_dep = ['department']\n\n#In the dataset, we add three values for the 'sexe' parameter : 0 for all, 1 for men and 2 for women. For cumulated values, we take only rows with the value 0 for 'sexe'\n\ndf_firstgraph = df[df['sexe'] == 0]\ndf_cumul_hosp = df_firstgraph\n\n#Now, we create two new columns, one for the cumulated number of hospitalisations and one for the cumulated number of reanimations.\n\ndf_cumul_hosp['cumulated hospitalisations'] = df_cumul_hosp.groupby(['day'], as_index=False)['hospitalisations'].cumsum()\ndf_cumul_hosp = df_cumul_hosp.groupby(['day'], as_index=False).max()\ndf_cumul_hosp['cumulated reanimation'] = df_cumul_hosp.groupby(['day'], as_index=False)['reanimation'].cumsum()\ndf_cumul_hosp = df_cumul_hosp.groupby(['day'], as_index=False).max()\n\ndf_filtered = df[df['sexe'] != 0]\ndf_filtered['sexe_str'] = df_filtered['sexe'].apply(lambda x: \"Men\" if x == 1 else \"Women\")\n\ndf_new_filtered = df[df['sexe'] == 0].groupby('department')\n\n\n\n\n#This part concerns the last part of the dashboard, everything around the France map that we use.\n\nwith open('apps/departements.geojson') as file:\n geo = geojson.load(file)\n\n#To get the current situation, we just get all the rows for the current date that we defined at the beginning.\n\ndf_map = df[(df['day'] == current_date) & (df['sexe'] == 0)]\ndf_dep_reg = pd.read_csv('apps/departements-region.csv')\n\n#We add the department and regions names.\n\ndf_map['dep_name'] = df_dep_reg['dep_name'].to_numpy()\ndf_map['region_name'] = df_dep_reg['region_name'].to_numpy()\ndf_curr_situation = df_map\n\n#We create two new columns to group numbers by regions\n\ndf_curr_situation['total_hosp_per_region'] = df_curr_situation.groupby(['region_name'], as_index=False)['hospitalisations'].cumsum()\ndf_curr_situation['total_rea_per_region'] = df_curr_situation.groupby(['region_name'], as_index=False)['reanimation'].cumsum()\n\ndf_curr_situation_fig_hosp = df_curr_situation.groupby(['region_name'], as_index=False).max()\n\n\n#The first graph will be defined thanks to the app callback at the end of the code. Right now, we will defined all the graphs that will be coming after the first one.\n#The order of definition here is the same order that we have in the final dashboard.\n\n#Those graphs will show the cumulated number of Hospitalisations and reanimations. We used the dataset that we create that contains these two columns to do that.\n\ntotal_hosp = px.scatter(df_cumul_hosp, x='day', y='cumulated hospitalisations',\n title=\"Evolution of the number of people hospitalized in France\")\ntotal_rea = px.scatter(df_cumul_hosp, x='day', y='cumulated reanimation',\n title=\"Evolution of the number of people in reanimation in France\")\n\n\n#The next three graphs wil be all informations based on the gender: Hospitalisations, Reanimations and Deceased\n\ngender_hosp = px.pie(df_filtered, values=\"hospitalisations\", names=\"sexe_str\", title=\"Hospitalisations by Gender\")\ngender_hosp.update_traces(textposition='outside',\n textinfo='percent+label',\n marker=dict(line=dict(color='#000000',\n width=2)),\n pull=[0.05, 0, 0.03],\n opacity=0.9,\n # rotation=180\n )\n\ngender_rea = px.pie(df_filtered, values=\"reanimation\", names=\"sexe_str\", title=\"Reanimation by Gender\")\ngender_rea.update_traces(textposition='outside',\n textinfo='percent+label',\n marker=dict(line=dict(color='#000000',\n width=2)),\n pull=[0.05, 0, 0.03],\n opacity=0.9,\n # rotation=180\n )\n\ngender_dec = px.pie(df_filtered, values=\"deaths\", names=\"sexe_str\", title=\"Deceased by Gender\")\ngender_dec.update_traces(textposition='outside',\n textinfo='percent+label',\n marker=dict(line=dict(color='#000000',\n width=2)),\n pull=[0.05, 0, 0.03],\n opacity=0.9,\n # rotation=180\n )\n\n#We create a map of France to get a better representaion of the current situation. The color will be the total number of deaths per department.\n#When using the map, each department will have the current number of people in hospitalisation and reanimation\n\nfrance_map = px.choropleth(df_map, geojson=geo, locations='department', featureidkey=\"properties.code\", color='deaths',\n labels={'deaths': 'Total deaths'}, color_continuous_scale=\"Viridis\", scope=\"europe\",\n hover_name='dep_name', hover_data=['hospitalisations', 'reanimation'])\nfrance_map.update_geos(showcountries=False, showcoastlines=False, showland=False, fitbounds=\"locations\")\n# france_map.update_layout(margin={\"r\":50,\"t\":50,\"l\":50,\"b\":50})\nfrance_map.update_layout(width=1000, height=1000)\n\n#Two graphs to see percentages of hospitalisation and reanimation in France for each region\n\nfig_region = px.pie(df_curr_situation_fig_hosp, values='total_hosp_per_region', names='region_name',\n title=\"Hospitalisation by region\")\nfig_region_rea = px.pie(df_curr_situation_fig_hosp, values='total_rea_per_region', names='region_name',\n title=\"Reanimation by region\")\n\n#Now that our graphs are defined, we can define the layout for the Dashboard\n\n#First, let's explain how the .css file that we used is working.\n#Like Bootstrap, this extension considers the screen as 12 columns of same length. We can structure the elements like we want by specifying the number of columns that we want for our element.\n#For example, we can decide to get the text on four columns and the graph on eight to have more space for the graph.\n# We can also divide the screen in three, four columns for each element, to display three graphs on the same 'row'.\n\nlayout = html.Div([\n html.H1('Covid Data in France', style={\"textAlign\": \"center\"}),\n\n #Here, we will have the first graph showing the evolution of the pandemic in France (you can choose to see number of hospitalisations, reanimations, home returns or deceased\n\n html.Div([\n html.Div([\n html.H4('Global evolution in France', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n html.P('Here we can choose the parameter that we want to see across time',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\n 'We can see the number of hospitalisations every day, number of reanimation and deaths (cumulated)\\n',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.Div(dcc.Dropdown(\n id='yaxis-column',\n options=[{'label': i, 'value': i} for i in liste_y],\n value='hospitalisations'\n )),\n ], className=\"three columns\", style={\"padding\": \"3%\"}), #We use a syntax similar to Bootstrap to structure the page. We can do this thanks to the external stylesheet that we dl before\n\n html.Div([\n dcc.Graph(id='first_graph', figure={})\n ], className=\"nine columns\")\n\n ], className='row', style={\"backgroundColor\": \"#313131\"}),\n\n html.Br(),\n html.Br(),\n\n html.Div([\n\n #We define here a column that will have the two graphs concerning the evolution of hospitalisations and reanimations\n\n html.Div([\n dcc.Graph(\n id='graph1',\n figure=total_hosp\n ),\n\n html.Br(),\n\n dcc.Graph(\n id='graph4',\n figure=total_rea),\n ], className='six columns'),\n\n html.Div([\n html.Br()\n ], className=\"one columns\"),\n\n #We define a zone of text to describe the graphs\n\n html.Div([\n html.H4('Hospitalisations and Reanimations', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n html.P(\n \"Now let's have a look at the total number of people who were hospitalized and who were in reanimation\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\n 'We can see the number of hospitalisations every day and number of reanimation (these results are cumulated)\\n',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\"\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n\n ], className=\"six columns\", style={\"backgroundColor\": \"#313131\", \"padding\": \"3%\"}),\n\n # dcc.Graph(id = 'third-graph', figure= gender_hosp, className='three columns'),\n # dcc.Graph(id = 'fourth-graph', figure= gender_hosp, className='three columns')\n ], className=\"row\", style={\"border\": \"solid\", \"padding\": \"2%\"}),\n\n\n #New block to display the graphs concerning the gender informations\n\n html.Div([\n\n html.Div([\n html.H4('Analysis per Gender', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n html.Br(),\n html.P(\n \"Now let's have a look at the influence of the gender on the different aspects of the pandemic\",\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n html.P(\n 'We can see total number of hospitalisations, reanimations and deceased per gender to see the impact of it\\n',\n style={\"textAlign\": \"justify\", \"color\": \"white\"}),\n html.Br(),\n\n ], className=\"row\", style={\"backgroundColor\": \"#313131\", \"padding\": \"3%\"}),\n\n #We put the three graphs on the same row\n\n html.Div([\n html.Div([\n dcc.Graph(\n id='graph2',\n figure=gender_hosp\n ),\n ], className='four columns'),\n\n html.Div([\n dcc.Graph(\n id='graph3',\n figure=gender_rea\n ),\n ], className='four columns'),\n\n html.Div([\n dcc.Graph(\n id='graph15',\n figure=gender_dec\n ),\n ], className='four columns'),\n\n ], className=\"row\"),\n\n ], style={\"padding\": \"3%\", \"border\": \"solid\"}),\n\n\n #Finally, we display the map of France with the graphs per region at the right side.\n\n html.Div([\n html.H2('Current COVID-19 situation in France', style={\"color\": \"white\", \"textAlign\": \"center\"}),\n ], className=\"row\", style={\"backgroundColor\": \"#313131\", \"padding\": \"3%\"}),\n\n html.Div([\n\n html.Div([\n dcc.Graph(\n id='graph8',\n figure=france_map),\n ], className=\"seven columns\", style={\"height\": \"100%\"}),\n\n html.Div([\n\n html.Div([\n dcc.Graph(\n id='graph861',\n figure=fig_region),\n ], style={\"height\": \"100%\"}),\n\n html.Div([\n dcc.Graph(\n id='graph871',\n figure=fig_region_rea),\n ], style={\"height\": \"100%\"}),\n\n ], className=\"five columns\", style={\"backgroundColor\": \"#313131\", \"padding\": \"3%\"}),\n\n ], className=\"row\", style={\"padding\": \"3%\", \"border\": \"solid\"}),\n\n], style={\"padding\": \"3%\"})\n\n\[email protected](\n Output('first_graph', 'figure'),\n [Input(\"yaxis-column\", \"value\")]\n)\ndef update_figure(yaxis_column_name):\n return px.scatter(\n df_firstgraph, x=df_firstgraph['day'], y=df_firstgraph[yaxis_column_name], color='department',\n render_mode=\"webgl\", title=\"Evolution of the COVID-19 situation in France\"\n )\n\n#We run the app in the main.py file\n# if __name__ == '__main__':\n# app.run_server(debug=False)\n" } ]
5
pawlaczyk/django_graphql
https://github.com/pawlaczyk/django_graphql
cf85678aa5d510f26007f37bb4a6faf48f05bd86
a097ec196630dc15f5c34e646840cd03f29028ba
ecf793279d90d7e316b1bc0243f7a36b7ba94530
refs/heads/master
2023-01-06T11:34:56.049440
2020-10-29T00:04:33
2020-10-29T00:04:33
307,100,661
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6544103026390076, "alphanum_fraction": 0.6562185883522034, "avg_line_length": 28.9819278717041, "blob_id": "76e497cca4c4b7bc2b7f22767a7045e21f36fd04", "content_id": "0531d09e9b92067b09c201f912668c73d31e8897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4990, "license_type": "no_license", "max_line_length": 133, "num_lines": 166, "path": "/movies/api/schema.py", "repo_name": "pawlaczyk/django_graphql", "src_encoding": "UTF-8", "text": "import graphene\nfrom graphene import relay\nfrom graphene_django.types import DjangoObjectType\nimport graphql_jwt\nfrom graphql_jwt.decorators import login_required\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphql_relay import from_global_id #do mutacji relay\nfrom .models import Director, Movie\n\n\nclass MovieType(DjangoObjectType): # podobnie do serializer\n class Meta:\n model = Movie\n\n #synamicznie dodane do modelu niekoniecznei z bazy\n movie_age = graphene.String()\n\n def resolve_movie_age(self, info):\n return \"Old movie\" if self.year < 2000 else \"New movie\"\n\n\nclass DirectorType(DjangoObjectType): # podobnie do serializer\n class Meta:\n model = Director\n\n\n# just for relay implementation\nclass MovieNode(DjangoObjectType):\n class Meta:\n model = Movie\n #dodanie filtrรณ dla pรณl, custom way to get datd\n #Filtr icontains - incase sensitive\n filter_fields = {\n 'title': [\"exact\", \"icontains\", \"istartswith\"],\n 'year': [\"exact\",]\n }\n interfaces = (relay.Node, ) #this django object witl be relay\n\n\n\nclass Query(graphene.ObjectType):\n # all_movies = graphene.List(MovieType)\n all_movies =DjangoFilterConnectionField(MovieNode)\n #movie = graphene.Field(MovieType, id=graphene.Int(), title=graphene.String()) #aceptujemy parametr id typu Integer, tytul String\n movie = relay.Node.Field(MovieNode)\n\n all_directors = graphene.List(DirectorType)\n\n \"\"\"\n @login_required\n def resolve_all_movies(self, info, **kwargs): # dodatkowe argumenty na pozniej\n # 1. roziwazanie info.context\n # user = info.context.user\n # if not user.is_authenticated:\n # raise Exception(\"Auth credentials were not provided\")\n return Movie.objects.all()\n \"\"\"\n\n def resolve_all_directors(self, info, **kwargs): # dodatkowe argumenty na pozniej\n return Director.objects.all()\n\n \"\"\"\n # zastapoione przez movie = relay.Node.Field(MovieNode)\n def resolve_movie(self, info, **kwargs): # dodatkowe argumenty na pozniej moลผna id, title zamiast **kwargs\n movie_id = kwargs.get('id')\n title = kwargs.get('title')\n\n if movie_id is not None:\n return Movie.objects.get(pk=movie_id)\n\n if title is not None:\n return Movie.objects.get(title=title)\n\n return None\n \"\"\"\n\nclass MovieCreateMutation(graphene.Mutation):\n class Arguments:\n title = graphene.String(required=True)\n year = graphene.Int(required=True)\n\n movie = graphene.Field(MovieType)\n\n def mutate(self, info, title, year): #zamiast **kwargs\n #nasza logika\n movie = Movie.objects.create(title=title, year=year)\n if title is not None:\n movie.title = title\n if year is not None:\n movie.year = year\n movie.save()\n\n return MovieUpdateMutation(movie=movie)\n\n\nclass MovieUpdateMutation(graphene.Mutation):\n class Arguments:\n title = graphene.String()\n year = graphene.Int()\n id = graphene.ID(required=True)\n\n movie = graphene.Field(MovieType)\n\n def mutate(self, info, id, title, year):\n #try catch dopisaฤ‡\n movie = Movie.objects.get(pk=id)\n\n return MovieCreateMutation(movie=movie)\n\n\n#relay implementation\nclass MovieUpdateMutationRelay(relay.ClientIDMutation):\n \"\"\"\n :example:\n mutation MutateRelay {\n updateMovieRelay(input: {id: \"TW92aWVOb2RlOjE=\", title:\"Test relay mutation\"})\n {\n movie{\n id\n title\n year\n }\n }\n }\n \"\"\"\n class Input:\n title = graphene.String()\n id = graphene.ID(required=True)\n\n movie = graphene.Field(MovieType)\n\n @classmethod #relay - trzbea nadpisac roznice\n def mutate_and_get_payload(cls, root, info, id, title):\n #try catch dopisaฤ‡\n #id dla relay jest inne - jest to long string characters\n movie = Movie.objects.get(pk=from_global_id(id)[1]) #jedyna rรณลผnica w wcyiagnaiu id z nodรณw relay\n\n if title is not None:\n movie.title = title\n movie.save()\n\n return MovieUpdateMutationRelay(movie=movie)\n\n\nclass MovieDeleteMutation(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n\n movie = graphene.Field(MovieType)\n\n def mutate(self, info, id):\n movie = Movie.objects.get(pk=id)\n movie.delete()\n\n return MovieUpdateMutation(movie=None)#juลผ nie ma movie bo usuniฤ™te\n\n\nclass Mutation:\n #gล‚owna klasa mutacji\n token_auth = graphql_jwt.ObtainJSONWebToken.Field() #token z loginu i hasล‚a uลผytkownika\n verify_token = graphql_jwt.Verify.Field()\n\n create_movie = MovieCreateMutation.Field() #dla jednego pecyficznego rekordu\n update_movie = MovieUpdateMutation.Field()\n update_movie_relay = MovieUpdateMutationRelay.Field() #dal relay\n delete_movie = MovieDeleteMutation.Field()\n" }, { "alpha_fraction": 0.7868338823318481, "alphanum_fraction": 0.7868338823318481, "avg_line_length": 25.625, "blob_id": "170780ffdae6a9224c73a3b09e15e024863d6116", "content_id": "073fcc3a855ad64f015a173ea0ff34cda77364a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 96, "num_lines": 24, "path": "/movies/schema.py", "repo_name": "pawlaczyk/django_graphql", "src_encoding": "UTF-8", "text": "import graphene\nimport movies.api.schema\n\n\nclass Query(movies.api.schema.Query, graphene.ObjectType): #dodawac jako arg z innych apek\n pass\n\n\nclass Mutation(movies.api.schema.Mutation, graphene.ObjectType): #dodawac jako arg z innych apek\n pass\n\n#dla frontu\n\"\"\"\npython manage.py graphql_schema\nSuccessfully dumped GraphQL schema to schema.json\nrobi siฤ™ nowy plik ma np \"name\": \"allMovies\",\nbฤ™dize trzeb aciฤ…gle generowฤ‡ nowe schema.json po zmianach w backendzie\nUลผywaฤ‡ jak siฤ™ nie ma default w settings.py\npython manage.py graphql_schema --schema paramtrodomojejchemca\n\n\"\"\"\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)" }, { "alpha_fraction": 0.7540603280067444, "alphanum_fraction": 0.7540603280067444, "avg_line_length": 38.181819915771484, "blob_id": "ff51f925ffbb62b384926b0b601ebf8957f7e347", "content_id": "bd1e6b70e2827ea4478ec373ee6f93fbfc1280a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 112, "num_lines": 11, "path": "/movies/urls.py", "repo_name": "pawlaczyk/django_graphql", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom graphene_django.views import GraphQLView\nfrom graphql_jwt.decorators import jwt_cookie\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # url dla graphql\n # path('graphql/', GraphQLView.as_view(schema)) jak bez general schema w settings\n path('graphql/', jwt_cookie(GraphQLView.as_view(graphiql=True))) #opakowanie ciastkiem jwt wszystkie te urle\n]\n" }, { "alpha_fraction": 0.8188976645469666, "alphanum_fraction": 0.8188976645469666, "avg_line_length": 24.399999618530273, "blob_id": "f8128057aac37caea9f916e6402bd7b4da212dee", "content_id": "e1c5f09c85ea4137c8c33dc94f90b4c455758171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/movies/api/admin.py", "repo_name": "pawlaczyk/django_graphql", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Director, Movie\n\nadmin.site.register(Movie)\nadmin.site.register(Director)\n" } ]
4
nlgordon/api-poc
https://github.com/nlgordon/api-poc
f45f9ec29e609f9662cab595c74b9f5617884215
eeab42c2a37bdc0fc7a5f835a1c01326ba190b25
fa31c2a9d06069fb997795621bd2b0d63a40bcf4
refs/heads/master
2020-03-25T00:19:01.488715
2018-08-01T16:17:04
2019-03-20T18:43:15
143,179,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.7416666746139526, "avg_line_length": 39.33333206176758, "blob_id": "4efdd2684a3e618a93b2ae7d6f873159dd8a79e2", "content_id": "c9dc1af5c56705b457079a59cf068ecc995848fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "no_license", "max_line_length": 99, "num_lines": 3, "path": "/scripts/run_postgres.sh", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ndocker run --name api-poc-postgres --rm -e POSTGRES_PASSWORD=pillar -p 5432:5432/tcp postgres" }, { "alpha_fraction": 0.8666666746139526, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 6.5, "blob_id": "b47be20198bed252d749d3b0c8c5c4c30d0d49f9", "content_id": "6c9ee71d8dd96b435c04f0132d662570529b7493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 30, "license_type": "no_license", "max_line_length": 10, "num_lines": 4, "path": "/requirements.txt", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "sanic\nPillow\naiopg\nsqlalchemy\n" }, { "alpha_fraction": 0.5438596606254578, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 18.33333396911621, "blob_id": "7c8b010709a5e5483a00a36a369b7e4c2a5efa3b", "content_id": "5c61a542b3e6a93501ec1271fb57e92264095db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "no_license", "max_line_length": 36, "num_lines": 3, "path": "/scripts/docker_run.sh", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ndocker run --rm -p 8002:8001 api-poc" }, { "alpha_fraction": 0.5622748136520386, "alphanum_fraction": 0.5787442326545715, "avg_line_length": 30.593496322631836, "blob_id": "667ab223a1e31dfaec1ee91631de1e67956dae35", "content_id": "cf2bab0aa4ef6b48197afd3fdadab0fc234d0638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3886, "license_type": "no_license", "max_line_length": 141, "num_lines": 123, "path": "/api_poc/app.py", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "from asyncio import sleep\nfrom datetime import datetime\nfrom io import BytesIO\nimport random\n\nimport os\n\nimport multiprocessing\n\nimport sqlalchemy\nfrom PIL import Image\nfrom aiopg.sa import create_engine\nfrom sanic import Sanic, response\nfrom sanic.response import json\n\napp = Sanic()\n\nmetadata = sqlalchemy.MetaData()\n\npolls = sqlalchemy.Table('sanic_polls', metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('question', sqlalchemy.String(50)),\n sqlalchemy.Column(\"pub_date\", sqlalchemy.DateTime))\n\n\[email protected]('before_server_start')\nasync def get_engine(app, loop):\n connection = get_connection_string()\n app.engine = await create_engine(connection, maxsize=1000)\n async with app.engine.acquire() as conn:\n ret = []\n async for row in conn.execute(\"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'postgres' and tablename = 'sanic_polls')\"):\n ret.append(row)\n\n if not ret:\n await conn.execute(\"DROP TABLE IF EXISTS sanic_polls\")\n await conn.execute(\"\"\"CREATE TABLE sanic_polls (\n id serial primary key,\n question varchar(50),\n pub_date timestamp\n );\"\"\")\n for i in range(0, 100):\n await conn.execute(\n polls.insert().values(question=i,\n pub_date=datetime.now())\n )\n\n\[email protected]('after_server_stop')\nasync def close_db(app, loop):\n app.engine.close()\n await app.engine.wait_closed()\n\n\ndef get_connection_string():\n return 'postgres://{0}:{1}@{2}/{3}'.format(os.environ.get('RDS_USERNAME', 'postgres'),\n os.environ.get('RDS_PASSWORD', 'password'),\n os.environ.get('RDS_HOSTNAME', 'localhost'),\n os.environ.get('RDS_DB_NAME', 'postgres'))\n\n\[email protected](\"/hello\")\nasync def hello(request):\n return json({\"hello\": \"world\"})\n\n\[email protected]('/sleepy')\nasync def sleepy(request):\n sleep_time = random.randint(0, 2000)\n await sleep(sleep_time / 1000)\n return json({\"sleepy\": str(sleep_time)[0:5]})\n\n\[email protected]('/sleepy-fixed')\nasync def sleepy(request):\n sleep_time = int(request.args['sleep'][0])\n await sleep(sleep_time / 1000)\n return json({\"sleepy\": str(sleep_time)[0:5]})\n\n\[email protected](\"/image\")\nasync def gen_image(request):\n start = datetime.now()\n encoded_png = generate_image()\n end = datetime.now()\n duration = (end - start).microseconds / 1000\n print(\"Image generation time: {}\".format(int(duration)))\n return response.raw(encoded_png, headers={'Content-Type': 'image/png'})\n\n\[email protected](\"/database\")\nasync def query_db(request):\n async with app.engine.acquire() as conn:\n result = []\n async for row in conn.execute(polls.select()):\n result.append({\"question\": row.question, \"pub_date\": row.pub_date})\n return json({\"polls\": result})\n\n\ndef generate_image():\n image = Image.new('RGBA', (256, 256))\n for i in range(0, 100):\n image.putpixel((rand_value(), rand_value()), (rand_value(), rand_value(), rand_value(), rand_value()))\n bytes_io = BytesIO()\n image.save(bytes_io, 'PNG')\n encoded_png = bytes_io.getvalue()\n return encoded_png\n\n\ndef rand_value():\n return random.randint(0, 255)\n\n\nif __name__ == '__main__':\n port = os.environ.get('SERVING_PORT', 8001)\n cores = multiprocessing.cpu_count()\n if not cores:\n cores = 1\n\n # cores = 1\n\n workers = os.environ.get('API_WORKERS', cores * 2)\n app.run(host='0.0.0.0', port=port, workers=workers)\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 16.33333396911621, "blob_id": "03cb83b69b2e67fe16923b3d64512fdfbad4dd1d", "content_id": "98c08d1693f4586f8ab217e367bcbf77c2647f9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 51, "license_type": "no_license", "max_line_length": 30, "num_lines": 3, "path": "/scripts/package.sh", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nzip ~/Desktop/api-poc.zip -r *" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.6739130616188049, "avg_line_length": 14.666666984558105, "blob_id": "1033f0f011ac1d2840cbdb2c80c48c55706bb4ef", "content_id": "d84a2cc116626a901532d27aa20b4c50e93b57b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 46, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/scripts/docker_build.sh", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ndocker build -t api-poc ." }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 18.399999618530273, "blob_id": "0f05b65f7c5fd4dcccfd6df14eef95bfc3a7791b", "content_id": "c09b5ab5f969a3463295f7023fe3a400c466e24f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 96, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/scripts/docker_publish.sh", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ndocker tag api-poc ngordonpillar/api-poc\n\ndocker push ngordonpillar/api-poc" }, { "alpha_fraction": 0.6824324131011963, "alphanum_fraction": 0.7229729890823364, "avg_line_length": 17.5, "blob_id": "7a218ec9eab541d8259d5e2e6d617a3785903b53", "content_id": "4b5c7430c746ebcfb1471f0a51d6dda9d70b757b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 148, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/Dockerfile", "repo_name": "nlgordon/api-poc", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nCOPY requirements.txt /\nRUN pip install -Ur /requirements.txt\nCOPY api_poc /api_poc/\nEXPOSE 8001\n\nCMD [\"python\", \"api_poc/app.py\"]\n" } ]
8
gissehel/serie
https://github.com/gissehel/serie
034f6d8639b7f5a6f8811587197eaec6a915b1f5
b7fea395e86e324196c83eacfd3e99862c3fd70b
2b32e4533df2304f3549bcaa589fd906a5cfb3f1
refs/heads/master
2020-03-19T00:02:29.107873
2019-08-30T02:44:33
2019-08-30T02:44:33
135,448,928
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3528654873371124, "alphanum_fraction": 0.4954386055469513, "avg_line_length": 42.395938873291016, "blob_id": "d6d52adcca0aa5e335c0f2d3fffc2f24e581d899", "content_id": "a1b2f00be83a04ac89b25e8593c50ed393bdcae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17100, "license_type": "no_license", "max_line_length": 488, "num_lines": 394, "path": "/serietest.py", "repo_name": "gissehel/serie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport unittest\nfrom serie import Serie\n\nclass ConsoleExporterMock(object) :\n def __init__(self) :\n self._outs = []\n self._errs = []\n\n def errs(self) :\n return self._errs\n\n def outs(self) :\n return self._outs\n\n def out(self, text) :\n # sys.stdout.write(text+'\\n')\n self._outs.append(text)\n\n def err(self, text) :\n sys.stderr.write(text+'\\n')\n self._errs.append(text)\n\n def debug(self, text) :\n self.stdout.write(text+'\\n')\n\n def clear_out(self) :\n self._outs.clear()\n\nclass DirMock(object) :\n def __init__(self, dirname) :\n self._dirname = dirname\n self._z = set()\n self._nz = set()\n class FileClass(object) :\n def __init__(self, filename, z, nz) :\n self._buffer = ''\n self._z = z\n self._nz = nz\n self._filename = filename\n def __enter__(self):\n self._buffer = ''\n def write(self, data) :\n self._buffer += data\n def __exit__(self, *args, **kwargs) :\n if len(self._buffer) == 0 :\n self._z.add(self._filename)\n else :\n self._nz.add(self._filename)\n self._fileclass = FileClass\n def listdir(self,dirname) :\n # print \"dirname: [%s][%s]\" % (self._dirname, dirname)\n return sorted(list(self._z) + list(self._nz))\n def filesize(self, filename) :\n if filename in self._z :\n return 0\n if filename in self._nz :\n return 1\n raise IOError()\n def touch(self, filename) :\n if (filename not in self._z) and (filename not in self._nz) :\n self._z.add(filename)\n def unlink(self, filename) :\n if filename in self._z :\n self._z.remove(filename)\n elif filename in self._nz :\n self._nz.remove(filename)\n else :\n raise IOError()\n def remove_exec(self, filename):\n pass\n def open(self, filename) :\n return self._fileclass(filename, self._z, self._nz)\n def fileexists(self, filename) : \n return (filename in self._z) or (filename in self._nz)\n def mkdir(self, dirname) :\n pass\n def apply(self, method_name, filename):\n # print \"apply: [%s][%s][%s]\" % (self._dirname, method_name, filename)\n method = getattr(self, method_name)\n return method(filename)\n\nclass SerieOsMock(object) :\n def __init__(self) :\n self._dirs = {}\n def _get_dirmock_filename(self, global_filename) :\n basename = os.path.basename(global_filename)\n dirname = os.path.dirname(global_filename)\n if dirname == '.' :\n dirname = ''\n if dirname not in self._dirs :\n self._dirs[dirname] = DirMock(dirname)\n return self._dirs[dirname], basename\n def apply(self, method_name, filename):\n dirmock, basename = self._get_dirmock_filename(filename)\n return dirmock.apply(method_name, basename)\n def listdir(self,dirname) :\n return self.apply('listdir', os.path.join(dirname, '.'))\n def filesize(self, filename) :\n return self.apply('filesize', filename)\n def touch(self, filename) :\n return self.apply('touch', filename)\n def unlink(self, filename) :\n return self.apply('unlink', filename)\n def remove_exec(self, filename):\n return self.apply('remove_exec', filename)\n def open(self, filename) :\n return self.apply('open', filename)\n def fileexists(self, filename) :\n return self.apply('fileexists', filename)\n def mkdir(self, dirname) :\n return self.apply('mkdir', dirname)\n\nclass TestSerie(unittest.TestCase) :\n def setUp(self) :\n self.maxDiff = None\n self._serieos = SerieOsMock()\n self._console = ConsoleExporterMock()\n\n def tearDown(self) :\n self.assertEqual(self._console.errs(),[])\n\n def assert_files(self, filenames, subdir=None) :\n if subdir is None :\n listdir = self._serieos.listdir('.')\n else :\n listdir = self._serieos.listdir(subdir)\n self.assertEqual(sorted(listdir),sorted(filenames))\n\n def assert_out(self, lines) :\n self.assertEqual(self._console.outs(), lines)\n\n def main(self,*args,**kwargs) :\n self._serie = Serie(self._serieos, self._console)\n self._serie.main(*args,**kwargs)\n\n def touch(self,filenames) :\n for filename in filenames :\n self._serieos.touch(filename)\n\n def test_old_basic(self) :\n self.touch(['-1-'])\n self.main('7')\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n def test_old_end(self) :\n self.touch(['-1-'])\n self.main('7')\n self.main('e9')\n self.assert_files(['-1--2--3--4--5--6-[7]-8--9-+'])\n def test_old_size_0_00(self) :\n self.touch(['-1-'])\n self.main('7')\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n self.main('11')\n self.assert_files(['-01--02--03--04--05--06-[07]-08--09--10-[11]'])\n def test_old_size_sep_20(self) :\n self.touch(['-1-'])\n self.main('7')\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n self.main('11')\n self.assert_files(['-01--02--03--04--05--06-[07]-08--09--10-[11]'])\n self.main('31')\n self.assert_files(['-01--02--03--04--05--06-[07]-08--09--10-[11]-12--13--14--15--16--17--18--19--20-','-21--22--23--24--25--26--27--28--29--30-[31]'],)\n def test_old_empty_row(self) :\n self.touch(['-1-'])\n self.main('7','51')\n self.assert_files(['-01--02--03--04--05--06-[07]-08--09--10--11--12--13--14--15--16--17--18--19--20-','-41--42--43--44--45--46--47--48--49--50-[51]'],)\n\n def test_new_basic(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n def test_new_end(self) :\n self.main('7')\n self.main('e9')\n self.assert_files(['@_-1--2--3--4--5--6-[7]-8--9-+'])\n def test_new_size_0_00(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n self.main('11')\n self.assert_files(['@_-01--02--03--04--05--06-[07]-08--09--10-[11]'])\n def test_new_size_sep_20(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n self.main('11')\n self.assert_files(['@_-01--02--03--04--05--06-[07]-08--09--10-[11]'])\n self.main('31')\n self.assert_files(['@_-01--02--03--04--05--06-[07]-08--09--10-[11]-12--13--14--15--16--17--18--19--20-','@_-21--22--23--24--25--26--27--28--29--30-[31]'],)\n def test_new_empty_row(self) :\n self.main('7','51')\n self.assert_files(['@_-01--02--03--04--05--06-[07]-08--09--10--11--12--13--14--15--16--17--18--19--20-','@_-41--42--43--44--45--46--47--48--49--50-[51]'],)\n\n def test_newsyntax_migration(self):\n self.touch(['-1-'])\n self.main('7')\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n self.main('m')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n\n def test_newsyntax_migration_2(self):\n self.touch(['-1-'])\n self.main('7')\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n self.main('migration')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n\n def test_namespace_basic(self):\n self.main('name:7')\n self.assert_files(['@_name_-1--2--3--4--5--6-[7]'])\n\n def test_namespace_numeral(self):\n self.main('name3:7')\n self.assert_files(['@_name03_-1--2--3--4--5--6-[7]'])\n\n def test_mix_namespaces(self):\n self.main('name3:7','name001:5')\n self.assert_files(['@_name01_-1--2--3--4-[5]','@_name03_-1--2--3--4--5--6-[7]'])\n self.main('name3:2-4','name1:s3-6','23')\n self.assert_files(['@_-21--22-[23]','@_name01_-1--2-!3!!4!$5$!6!','@_name03_-1-[2][3][4]-5--6-[7]'])\n\n def test_mix_namespaces_with_comma(self):\n self.main('name3:7','name001:5')\n self.assert_files(['@_name01_-1--2--3--4-[5]','@_name03_-1--2--3--4--5--6-[7]'])\n self.main('name3:2,4','name1:s3,6','23')\n self.assert_files(['@_-21--22-[23]','@_name01_-1--2-!3!-4-[5]!6!','@_name03_-1-[2]-3-[4]-5--6-[7]'])\n\n def test_end2(self) :\n self.main('7')\n self.main('-9')\n self.assert_files(['@_-1--2--3--4--5--6-[7]-8--9-'])\n def test_interval_creation(self) :\n self.main('3-7')\n self.assert_files(['@_-1--2-[3][4][5][6][7]'])\n def test_interval_use(self) :\n self.main('9')\n self.assert_files(['@_-1--2--3--4--5--6--7--8-[9]'])\n self.main('3-7')\n self.assert_files(['@_-1--2-[3][4][5][6][7]-8-[9]'])\n def test_interval_overlap(self) :\n self.main('9')\n self.assert_files(['@_-1--2--3--4--5--6--7--8-[9]'])\n self.main('2-5','4-7')\n self.assert_files(['@_-1-[2][3][4][5][6][7]-8-[9]'])\n def test_consolidate(self) :\n self.touch(['[7]'])\n self.assert_files(['[7]'])\n self.main()\n self.assert_files(['-1--2--3--4--5--6-[7]'])\n def test_consolidate_multiple(self) :\n self.touch(['[7]','[14][9]'])\n self.assert_files(['[7]','[14][9]'])\n self.main()\n self.assert_files(['-01--02--03--04--05--06-[07]-08-[09]-10--11--12--13-[14]'])\n def test_consolidate_multiple_overlap(self) :\n self.touch(['[7][8][9]','[08][09][10][11]'])\n self.main()\n self.assert_files(['-01--02--03--04--05--06-[07][08][09][10][11]'])\n def test_consolidate_multiple_overlap_oldandnewsyntax(self) :\n self.touch(['[7][8][9]','@_[08][09][10][11]'])\n self.main()\n self.assert_files(['@_-01--02--03--04--05--06-[07][08][09][10][11]'])\n def test_suppression_basic(self) :\n self.main('9')\n self.assert_files(['@_-1--2--3--4--5--6--7--8-[9]'])\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]-8-[9]'])\n self.main('-7')\n self.assert_files(['@_-1--2--3--4--5--6--7--8-[9]'])\n def test_suppression_end_empty(self) :\n self.main('e7','-7')\n self.assert_files(['@_-1--2--3--4--5--6--7-+'])\n def test_seen_basic(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n self.main('4')\n self.assert_files(['@_-1--2--3-[4]-5--6-[7]'])\n self.main('s4')\n self.assert_files(['@_-1--2--3-$4$-5--6-[7]'])\n def test_seen_interval(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n self.main('1-5')\n self.assert_files(['@_[1][2][3][4][5]-6-[7]'])\n self.main('s2-4')\n self.assert_files(['@_[1]$2$$3$$4$[5]-6-[7]'])\n def test_seen_interval2(self) :\n self.main('7')\n self.assert_files(['@_-1--2--3--4--5--6-[7]'])\n self.main('1-4')\n self.assert_files(['@_[1][2][3][4]-5--6-[7]'])\n self.main('s2-5')\n self.assert_files(['@_[1]$2$$3$$4$!5!-6-[7]'])\n def test_comma(self) :\n self.main('1,4,7')\n self.assert_files(['@_[1]-2--3-[4]-5--6-[7]'])\n def test_comma_interval_1(self) :\n self.main('1,4-6,8')\n self.assert_files(['@_[1]-2--3-[4][5][6]-7-[8]'])\n def test_comma_interval_2(self) :\n self.main('113-124,139-145,152-155')\n self.assert_files(['@_-101--102--103--104--105--106--107--108--109--110--111--112-[113][114][115][116][117][118][119][120]','@_[121][122][123][124]-125--126--127--128--129--130--131--132--133--134--135--136--137--138-[139][140]','@_[141][142][143][144][145]-146--147--148--149--150--151-[152][153][154][155]'])\n def test_comma_interval_multiple(self) :\n self.main('1,4-6,8','s3-4,6-8')\n self.assert_files(['@_[1]-2-!3!$4$[5]$6$!7!$8$'])\n def test_consolidate_multiple_end(self) :\n self.touch(['[7]','[14][9]','-17-'])\n self.assert_files(['[7]','[14][9]','-17-'])\n self.main()\n self.assert_files(['-01--02--03--04--05--06-[07]-08-[09]-10--11--12--13-[14]-15--16--17-'])\n def test_consolidate_multiple_end_2(self) :\n self.touch(['[7]','@_[14][9]','-22-'])\n self.assert_files(['[7]','@_[14][9]','-22-'])\n self.main()\n self.assert_files(['@_-01--02--03--04--05--06-[07]-08-[09]-10--11--12--13-[14]-15--16--17--18--19--20-','@_-21--22-'])\n def test_suppression_empty(self) :\n self.main('-7')\n self.assert_files(['@_-1--2--3--4--5--6--7-'])\n\n def test_subdir_basic(self):\n self.main('s1:7')\n self.assert_files(['@_s01_-1--2--3--4--5--6-[7]'])\n self.main('s1~SUB01')\n self.assert_files(['@_s01~SUB01'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n\n def test_subdir_basic_other_order(self):\n self.main('s1~SUB01')\n self.assert_files(['@_s01~SUB01'])\n self.main('s1:7')\n self.assert_files(['@_s01~SUB01'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n\n def test_subdir_multiple(self):\n self.main('s1:7','5')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01_-1--2--3--4--5--6-[7]'])\n self.main('s1~SUB01')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01~SUB01'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n self.main('s002~SUB002','s2:e10','s3:4')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01~SUB01','@_s02~SUB002','@_s03_-1--2--3-[4]'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n self.assert_files(['@_-01--02--03--04--05--06--07--08--09--10-+'],subdir='SUB002')\n\n def test_flatten(self):\n self.main('s1:7','5','s1~SUB01','s002~SUB002','s2:e10','s3:4')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01~SUB01','@_s02~SUB002','@_s03_-1--2--3-[4]'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n self.assert_files(['@_-01--02--03--04--05--06--07--08--09--10-+'],subdir='SUB002')\n self.main('flatten')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01_-1--2--3--4--5--6-[7]','@_s02_-01--02--03--04--05--06--07--08--09--10-+','@_s03_-1--2--3-[4]'])\n self.assert_files([],subdir='SUB01')\n self.assert_files([],subdir='SUB002')\n\n def test_flatten_f(self):\n self.main('s1:7','5','s1~SUB01','s002~SUB002','s2:e10','s3:4')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01~SUB01','@_s02~SUB002','@_s03_-1--2--3-[4]'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n self.assert_files(['@_-01--02--03--04--05--06--07--08--09--10-+'],subdir='SUB002')\n self.main('f')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01_-1--2--3--4--5--6-[7]','@_s02_-01--02--03--04--05--06--07--08--09--10-+','@_s03_-1--2--3-[4]'])\n self.assert_files([],subdir='SUB01')\n self.assert_files([],subdir='SUB002')\n\n def test_text(self):\n self.main('s1:7','5','s1~SUB01','s002~SUB002','s2:e10','s3:4','s3:113-124,139-145,152-155','s3:s123-126')\n self.assert_files(['@_-1--2--3--4-[5]','@_s01~SUB01','@_s02~SUB002','@_s03_-001--002--003-[004]-005--006--007--008--009--010--011--012--013--014--015--016--017--018--019--020-','@_s03_-101--102--103--104--105--106--107--108--109--110--111--112-[113][114][115][116][117][118][119][120]','@_s03_[121][122]$123$$124$!125!!126!-127--128--129--130--131--132--133--134--135--136--137--138-[139][140]','@_s03_[141][142][143][144][145]-146--147--148--149--150--151-[152][153][154][155]'])\n self.assert_files(['@_-1--2--3--4--5--6-[7]'],subdir='SUB01')\n self.assert_files(['@_-01--02--03--04--05--06--07--08--09--10-+'],subdir='SUB002')\n self.main('text')\n self.assert_out([\n ' 1 2 3 4 [5]',\n '',\n 's01 (SUB01):',\n ' 1 2 3 4 5 6 [7]',\n '',\n 's02 (SUB002):',\n ' 01 02 03 04 05 06 07 08 09 10 ++',\n '',\n 's03:',\n ' 001 002 003 [004] 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020 ',\n ' 021 022 023 024 025 026 027 028 029 030 031 032 033 034 035 036 037 038 039 040 ',\n ' 041 042 043 044 045 046 047 048 049 050 051 052 053 054 055 056 057 058 059 060 ',\n ' 061 062 063 064 065 066 067 068 069 070 071 072 073 074 075 076 077 078 079 080 ',\n ' 081 082 083 084 085 086 087 088 089 090 091 092 093 094 095 096 097 098 099 100 ',\n ' 101 102 103 104 105 106 107 108 109 110 111 112 [113] [114] [115] [116] [117] [118] [119] [120]',\n '[121] [122] $123$ $124$ !125! !126! 127 128 129 130 131 132 133 134 135 136 137 138 [139] [140]',\n '[141] [142] [143] [144] [145] 146 147 148 149 150 151 [152] [153] [154] [155]',\n '',\n ])\n\nif __name__ == '__main__' :\n unittest.main()\n\n\n" }, { "alpha_fraction": 0.4711934030056, "alphanum_fraction": 0.48148149251937866, "avg_line_length": 16.80769157409668, "blob_id": "ab01b967b95bfc8ac7334bf41e12bb741c49e7f3", "content_id": "f2740106d0fce7bc746b3f5acc5e2922807c1c69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 45, "num_lines": 26, "path": "/setup.py", "repo_name": "gissehel/serie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom distutils.core import setup\r\nimport py2exe\r\nimport sys\r\nimport os\r\nimport yaml\r\n\r\nDIST_PATH = r'bin'\r\n\r\nsys.argv = [sys.argv[0],]\r\nsys.argv += ['py2exe','--dist-dir',DIST_PATH]\r\n\r\nsetup(\r\n #console=['serie.py'],\r\n windows=['serie.py'],\r\n options={\r\n 'py2exe' : {\n 'bundle_files':1,\n 'compressed':True,\r\n 'includes' : [\r\n # 'zope.interface',\r\n # 'yaml',\r\n ]\r\n },\r\n },\r\n )\r\n" }, { "alpha_fraction": 0.4655507504940033, "alphanum_fraction": 0.47137945890426636, "avg_line_length": 43.903804779052734, "blob_id": "77aa4def2368e0db6a2587019de3c2d10d86c27b", "content_id": "b267ef40efeece09ae572c6bfde05a0e23b1018a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20073, "license_type": "no_license", "max_line_length": 576, "num_lines": 447, "path": "/serie.py", "repo_name": "gissehel/serie", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport re\nimport sys\nimport stat\n\nclass SerieState(object) :\n NONE = 0\n GOT = 1\n SEEN = 2\n GOTSEEN = 3\n\nclass ConsoleExporter(object) :\n def out(self, text) :\n sys.stdout.write(text+'\\n')\n\n def err(self, text) :\n sys.stderr.write(text+'\\n')\n\n def debug(self, text) :\n sys.stdout.write(text+'\\n')\n\nclass SerieOs(object) :\n def listdir(self, dirname) :\n return os.listdir(dirname)\n def filesize(self, filename) :\n return os.lstat(filename).st_size\n def touch(self, filename) :\n #basedir = os.path.basedir(filename)\n #if basedir != '' and not os.exists(basedir) :\n # os.makedirs(dirname, 0777)\n handle = open(filename,'wb')\n handle.close()\n self.remove_exec(filename)\n def unlink(self, filename) :\n os.unlink(filename)\n def remove_exec(self, filename) :\n S_IX=(stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)\n mode = stat.S_IMODE(os.lstat(filename).st_mode)\n if (mode & S_IX != 0) :\n os.chmod(filename, mode &~S_IX)\n def open(self, filename) :\n return open(filename, 'wb')\n def fileexists(self, filename) :\n return os.path.exists(filename)\n def mkdir(self, dirname) :\n if not os.path.exists(dirname) :\n os.makedirs(dirname, 0777)\n\nclass Serie(object) :\n SERIE_FILE_RE = re.compile(r'^(\\@[\\:\\_])?([^\\[\\]\\$\\!\\@\\:\\~0-9][^\\[\\]\\$\\!\\@\\:\\~]*_)?(?:[\\[\\-\\$\\!][0-9]+[\\]\\-\\$\\!])+(?:[\\+\\#])?$')\n SERIE_ITEM_RE = re.compile(r'([\\[\\-\\$\\!])([0-9]+)[\\]\\-\\$\\!]')\n SERIE_FILE_LINK_RE = re.compile(r'^@_([^\\[\\]\\$\\!\\@\\:\\~0-9][^\\[\\]\\$\\!\\@\\:\\~]*)~(.*)$')\n split_at = 20\n\n NUM_RE = re.compile(r'^[0-9]+$')\n NUM_RANGE_RE = re.compile(r'^[0-9]+\\-[0-9]+$')\n STATE_BEGIN_CHARS = '-[!$'\n STATE_END_CHARS = '-]!$'\n NAMESPACE_WITH_NUM_RE = re.compile(r'^(.*?)([0-9]+)$')\n\n def __init__(self, serieos, console) :\n self._serieos = serieos\n self._console = console\n\n self._dir = '.'\n self._namespaces = {}\n self._files = []\n self._write_text = False\n self._write_html = False\n self._new_syntax = True\n self.debug(\"=> Serie.__init__\", None)\n\n def debug(self, name, value) :\n # print \"%s : [%s]\" % (name,value)\n # self._console.out(\"%s : [%s]\" % (name,value))\n pass\n\n def error(self, message) :\n # print message\n self._console.err(message)\n\n def init_namespace(self, namespace) :\n namespace = namespace.replace(':','_')\n if namespace.endswith('_') :\n raise Exception('No!')\n namespace_num_match = self.NAMESPACE_WITH_NUM_RE.match(namespace)\n if namespace_num_match is not None:\n prefix, count = namespace_num_match.groups()\n namespace = prefix+\"%02d\" % (int(count),)\n self.debug('namespace',namespace)\n namespace_struct = self._namespaces.setdefault(namespace,{'nums':{},'max':None,'subdir':None})\n return namespace, namespace_struct['nums']\n\n def set_max(self, namespace, value) :\n self._namespaces[namespace]['max'] = value\n\n def get_max(self, namespace) :\n return self._namespaces[namespace]['max']\n\n def set_subdir(self, namespace, value) :\n self._namespaces[namespace]['subdir'] = value\n\n def get_subdir(self, namespace) :\n if namespace not in self._namespaces :\n return None\n return self._namespaces[namespace]['subdir']\n\n def get_namespace_by_subdir(self, subdir) :\n for namespace in self.get_namespaces() :\n if self.get_subdir(namespace) == subdir :\n return namespace\n return None\n\n def scan(self) :\n has_old_syntax = False\n has_new_syntax = False\n\n subdirs_to_parse = [ None ]\n subdirs_parsed = []\n\n while len(subdirs_to_parse) > 0 :\n current_subdir = subdirs_to_parse.pop(0)\n if current_subdir not in subdirs_parsed :\n if current_subdir is None :\n current_dir = '.'\n else :\n current_dir = current_subdir\n current_namespace = self.get_namespace_by_subdir(current_subdir)\n\n for filename in self._serieos.listdir(current_dir) :\n fullfilename = filename\n if current_subdir is not None :\n fullfilename = os.path.join(current_dir, filename)\n size = self._serieos.filesize(fullfilename)\n if size == 0:\n self.debug('scanning file', fullfilename)\n self.debug('current_subdir', current_subdir)\n self.debug('current_namespace', current_namespace)\n\n if (self.SERIE_FILE_RE.match(filename) is not None) :\n namespace = ''\n if filename.startswith('@') :\n has_new_syntax = True\n else :\n has_old_syntax = True\n namespace_parts = filename.split('_')\n if len(namespace_parts) >= 3 :\n namespace = '_'.join(namespace_parts[1:-1])\n if current_namespace is not None :\n if namespace != '' :\n self.debug('namespaces',[current_namespace, namespace])\n namespace = '_'.join([current_namespace, namespace])\n else :\n self.debug('namespace alone',current_namespace)\n namespace = current_namespace\n namespace, nums = self.init_namespace(namespace)\n for state, num in self.SERIE_ITEM_RE.findall(namespace_parts[-1]) :\n #print state,num\n state = (self.STATE_BEGIN_CHARS.index(state))\n num = int(num)\n # print num,got\n if num not in nums:\n nums[num] = 0\n nums[num] |= state\n if filename[-1:] in ('+','#') :\n self.set_max(namespace, max(nums.keys()))\n # print \"max:\",self.get_max(namespace)\n self._files.append(fullfilename)\n else :\n match = self.SERIE_FILE_LINK_RE.match(filename)\n if match is not None :\n has_new_syntax = True\n\n namespace, subdir = match.groups()\n subdir = subdir.replace('_', os.sep)\n if current_subdir is not None :\n subdir = os.path.join(current_dir, subdir)\n if current_namespace is not None :\n namespace = '_'.join([current_namespace, namespace])\n namespace, nums = self.init_namespace(namespace)\n self.set_subdir(namespace, subdir)\n if (subdir not in subdirs_to_parse) and (subdir not in subdirs_parsed) :\n subdirs_to_parse.append(subdir)\n self._serieos.mkdir(subdir)\n self._files.append(fullfilename)\n subdirs_parsed.append(current_subdir)\n\n if has_new_syntax :\n self._new_syntax = True\n elif has_old_syntax :\n if any(namespace != '' for namespace in self.get_namespaces()) :\n self._new_syntax = True\n else :\n self._new_syntax = False\n else :\n self._new_syntax = True\n\n\n def get_prefix(self, namespace) :\n subdir = self.get_subdir(namespace)\n if self._new_syntax :\n if subdir is not None :\n prefix = os.path.join(subdir, '@_')\n elif namespace == '':\n prefix = '@_'\n else:\n prefix = '@_'+namespace+'_'\n else :\n prefix = ''\n return prefix\n\n def get_namespaces(self) :\n namespaces = self._namespaces.keys()\n return sorted(namespaces)\n\n def write_files(self, max_by_namespace):\n files_to_write = []\n files_not_to_remove = []\n\n for namespace in self.get_namespaces() :\n subdir = self.get_subdir(namespace)\n if subdir is not None :\n if '_' in namespace :\n parent_namespace, base_namespace = namespace.rsplit('_',1)\n else :\n parent_namespace = ''\n base_namespace = namespace\n parent_subdir = self.get_subdir(parent_namespace)\n filename_subdir = subdir\n if parent_subdir is not None :\n if subdir.startswith(parent_subdir):\n filename_subdir = subdir[len(parent_subdir):].strip(os.sep)\n filename_subdir = filename_subdir.replace(os.sep,'_')\n filename = self.get_prefix(parent_namespace)\n filename += base_namespace\n filename += '~'\n filename += filename_subdir\n\n if filename in self._files :\n files_not_to_remove.append(filename)\n else :\n files_to_write.append(filename)\n\n got_all = True\n if max_by_namespace[namespace] is not None :\n digit_count = str(len(str(max_by_namespace[namespace])))\n current_filename = self.get_prefix(namespace)\n all_are_none = True\n for index in xrange(1,max_by_namespace[namespace]+1) :\n # print \"index:\",index\n strnum = ('%0'+digit_count+'d') % (index,)\n state = self._namespaces[namespace]['nums'].get(index,0)\n got_all = got_all and ((state & SerieState.GOT) != 0)\n if state != SerieState.NONE or index == max_by_namespace[namespace] :\n all_are_none = False\n current_filename += (self.STATE_BEGIN_CHARS[state]) + strnum + (self.STATE_END_CHARS[state])\n if index == self.get_max(namespace) :\n if got_all :\n current_filename += '#'\n else :\n current_filename += '+'\n all_are_none = False\n # print current_filename\n if (index%self.split_at == 0) or (index == max_by_namespace[namespace]) :\n if not(all_are_none) :\n if current_filename in self._files :\n files_not_to_remove.append(current_filename)\n else :\n files_to_write.append(current_filename)\n current_filename = self.get_prefix(namespace)\n all_are_none = True\n\n for filename in files_to_write :\n self.debug(\"create\", filename)\n self._serieos.touch(filename)\n\n for filename in self._files :\n if filename not in files_not_to_remove :\n self.debug(\"remove\", filename)\n self._serieos.unlink(filename)\n else:\n self.debug(\"keep\", filename)\n self._serieos.remove_exec(filename)\n\n def write_text(self, max_by_namespace) :\n if self._write_text :\n if any(max_by_namespace[namespace] is not None for namespace in self.get_namespaces()) :\n for namespace in self.get_namespaces() :\n if max_by_namespace[namespace] is not None :\n got_all = True\n\n digit_count = str(len(str(max_by_namespace[namespace])))\n if namespace != '' :\n subdir = self.get_subdir(namespace)\n if subdir is None :\n self._console.out('%s:' % (namespace,))\n else :\n self._console.out('%s (%s):' % (namespace, subdir))\n line = ''\n for index in xrange(1,max_by_namespace[namespace]+1) :\n strnum = ('%0'+digit_count+'d') % (index,)\n state = self._namespaces[namespace]['nums'].get(index,0)\n got_all = got_all and ((state & SerieState.GOT) != 0)\n\n state_char = ' [!$'[state]\n state_char_end = state_char.replace('[',']')\n line += '%s%s%s' % (state_char, strnum, state_char_end)\n if (index == max_by_namespace[namespace]) or (index % self.split_at == 0) :\n if index == self.get_max(namespace) :\n if got_all :\n line += ' ##'\n else :\n line += ' ++'\n self._console.out(line)\n line = ''\n else :\n line += ' '\n self._console.out('')\n\n def write_html(self, max_by_namespace) :\n if self._write_html or self._serieos.fileexists('serie.html') :\n if any(max_by_namespace[namespace] is not None for namespace in self.get_namespaces()) :\n with self._serieos.open('serie.html') as handle :\n handle.write('<!doctype html>\\n<html>\\n<head><style>\\nbody { background : #ffffff; }\\ntable { border : 1px solid #000000; margin-bottom: 10px; }\\ntd { font-family : calibri, sans-serif; font-size : 11px; font-weight : bold; width : 30px; height: 30px; text-align : center; }\\n.got { border : 1px solid #000000; }\\n.ungot { border : 1px solid #ffffff; }\\n.seen { background-color : #f8f; }\\n.unseen { }\\n.complete { border : 1px solid #000000; }\\n.uncomplete { border : 1px dotted #000000; }\\n.namespace { font-size : 1.4em; }\\n</style>\\n</head>\\n<body>\\n')\n for namespace in self.get_namespaces() :\n if max_by_namespace[namespace] is not None :\n digit_count = str(len(str(max_by_namespace[namespace])))\n handle.write('<table class=\"%s\">\\n' % ('complete' if self.get_max(namespace) is not None else 'uncomplete'))\n if namespace != '' :\n handle.write('<tr><td class=\"namespace\" colspan=\"20\">%s</td></tr>\\n' % (namespace))\n for index in xrange(1,max_by_namespace[namespace]+1) :\n if (index % self.split_at == 1) :\n handle.write('<tr>')\n strnum = ('%0'+digit_count+'d') % (index,)\n state = self._namespaces[namespace]['nums'].get(index,0)\n # got_all = got_all and ((state & SerieState.GOT) != 0)\n handle.write('<td class=\"%s %s\">%s</td>' % ('got' if state & SerieState.GOT else 'ungot','seen' if state & SerieState.SEEN else 'unseen',strnum))\n if (index == max_by_namespace[namespace]) or (index % self.split_at == 0) :\n handle.write('</tr>\\n')\n handle.write('</table>\\n')\n handle.write('</body>\\n</html>\\n')\n else :\n self._serieos.unlink('serie.html')\n\n def write(self) :\n max_by_namespace = {}\n for namespace in self.get_namespaces() :\n max_by_namespace[namespace] = self.get_max(namespace)\n if max_by_namespace[namespace] is None :\n if len(self._namespaces[namespace]['nums']) > 0 :\n max_by_namespace[namespace] = max(self._namespaces[namespace]['nums'].keys())\n else :\n pass\n self.write_files(max_by_namespace)\n self.write_html(max_by_namespace)\n self.write_text(max_by_namespace)\n\n def main(self, *argv) :\n self.scan()\n self.add_items(*argv)\n self.write()\n\n def add_items(self,*argv) :\n for item in argv :\n self.add_item(item)\n\n def add_item(self, item) :\n self.debug('item',item)\n if item == 'html' :\n self._write_html = True\n elif item == 'text' :\n self._write_text = True\n elif item in ('m','migration') :\n self._new_syntax = True\n elif item in ('f','flatten') :\n self.flatten()\n elif '~' in item :\n namespace, rawitem = item.split('~',1)\n self.add_link(namespace, rawitem)\n elif ':' in item :\n namespace, rawitem = item.rsplit(':',1)\n self.add_num_item(namespace, rawitem)\n elif '_' in item :\n namespace, rawitem = item.rsplit('_',1)\n self.add_num_item(namespace, rawitem)\n else :\n self.add_num_item('', item)\n\n def add_link(self, namespace, item) :\n namespace, nums = self.init_namespace(namespace)\n if item is not None and item != '' :\n item = item.replace('_',os.sep)\n item = item.strip(os.sep)\n self.set_subdir(namespace, item)\n self._serieos.mkdir(item)\n\n def add_num_item(self, namespace, item) :\n namespace, nums = self.init_namespace(namespace)\n if item[:1] == 'e' :\n item = item[1:]\n if item == '' :\n self.set_max(namespace, max(nums.keys()))\n else :\n self.set_max(namespace, int(item))\n else :\n states_infos = {\n '+' : (SerieState.GOT, True),\n '-' : (SerieState.GOT, False),\n 's' : (SerieState.SEEN, True),\n 'u' : (SerieState.SEEN, False),\n }\n states = []\n while item[:1] in states_infos.keys() :\n states.append(states_infos[item[:1]])\n item = item[1:]\n if len(states) == 0 :\n states.append(states_infos['+'])\n item_nums = []\n for element in item.split(\",\") :\n match = self.NUM_RE.match(element)\n if match is not None :\n item_nums.append(int(element))\n else :\n match = self.NUM_RANGE_RE.match(element)\n if match is not None :\n start,end = map(int,element.split('-',2))\n if end<start :\n start,end = end,start\n item_nums = item_nums + list(xrange(start,end+1))\n else :\n self.error(\"Can't understand [%s]\" % element)\n for num in item_nums :\n for state_change, state_change_add in states :\n if state_change_add :\n nums[num] = nums.get(num,SerieState.NONE) | state_change\n else :\n nums[num] = nums.get(num,SerieState.NONE) & ~state_change\n\n def flatten(self) :\n for namespace in self.get_namespaces() :\n self._namespaces[namespace]['subdir'] = None\n\nif __name__ == '__main__' :\n serieos = SerieOs()\n console_exporter = ConsoleExporter()\n Serie(serieos, console_exporter).main(*(sys.argv[1:]))\n\n" } ]
3
gitSergeyhab/Tula_networks
https://github.com/gitSergeyhab/Tula_networks
4eed6b75dccfe5c29d16f0c1f1bf1a2cf3b50078
f67863565ab506327be3e2da9484799da563ea4d
753dc158155ae48c877a1eda979cd31b935ea8e4
refs/heads/master
2023-07-01T18:08:25.625014
2021-08-08T17:11:00
2021-08-08T17:11:00
300,639,079
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4962993562221527, "alphanum_fraction": 0.5090460777282715, "avg_line_length": 28.301204681396484, "blob_id": "1b45c8f12e5dacc6f8c5a2e0bf2233789dd86786", "content_id": "878409827f3dd542f4cbdc07bd8e009e2a17563d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2721, "license_type": "no_license", "max_line_length": 74, "num_lines": 83, "path": "/Tula_Networks/tula_net/context_processors.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\n\n# def just_titles(request):\n# return {\n# 'tit_substations': 'ะŸะพะดัั‚ะฐะฝั†ะธะธ',\n# 'tit_subscribers': 'ะžั€ะณะฐะฝะธะทะฐั†ะธะธ',\n# 'tit_phones': 'ะขะตะปะตั„ะพะฝั‹',\n# 'tit_persons': 'ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝั‹ะต ะปะธั†ะฐ',\n# 'tit_sections': 'ะกะตะบั†ะธะธ',\n# 'tit_lines': 'ะ›ะธะฝะธะธ',\n# 'tit_feeders': 'ะคะธะดะตั€ั‹',\n# 'tit_substation': 'ะŸะพะดัั‚ะฐะฝั†ะธั',\n# 'tit_subscriber': 'ะžั€ะณะฐะฝะธะทะฐั†ะธั',\n# 'tit_phone': 'ะขะตะปะตั„ะพะฝ',\n# 'tit_person': 'ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝั‹ะพ ะปะธั†ะพ',\n# 'tit_section': 'ะกะตะบั†ะธั',\n# 'tit_line': 'ะ›ะธะฝะธั',\n# 'tit_feeder': 'ะคะธะดะตั€',\n# }\n\n\ndef add_titles(request):\n return {\n 'title': 'ะขัƒะปัŒัะบะธะต ะกะตั‚ะธ',\n 'title_add': 'ะ”ะพะฑะฐะฒะธั‚ัŒ',\n 'title_upd': 'ะ ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ',\n 'title_del': 'ะฃะดะฐะปะธั‚ัŒ',\n 'title_feeder': 'ั„ะธะดะตั€',\n 'title_substation': 'ะฟะพะดัั‚ะฐะฝั†ะธัŽ',\n 'title_subscriber': 'ะพั€ะณะฐะฝะธะทะฐั†ะธัŽ',\n 'title_phone': 'ั‚ะตะปะตั„ะพะฝ',\n 'title_person': 'ะปะธั†ะพ',\n 'title_section': 'ัะตะบั†ะธัŽ',\n 'title_line': 'ะปะธะฝะธัŽ',\n 'title_char': 'ั…ะฐั€ะฐะบั‚ะตั€ะธัั‚ะธะบะธ ั„ะธะดะตั€ะฐ',\n }\n\n\ndef logik(request):\n return {\n 'current_year': datetime.now().year,\n 'logik_metod': request.path[1:5],\n 'logik_obj': request.path[4:10],\n 'logik_add': 'add_',\n 'logik_upd': 'upd_',\n 'logik_del': 'del_',\n 'logik_feeder': '_feede',\n 'logik_substation': '_subst',\n 'logik_subscriber': '_subsc',\n 'logik_phone': '_phone',\n 'logik_person': '_perso',\n 'logik_section': '_secti',\n 'logik_line': '_line/',\n 'logik_char': '_chara'\n }\n\n\ndef signs(request):\n return {\n 'context_menu': {\n 'substations': 'ะŸะพะดัั‚ะฐะฝั†ะธะธ',\n 'lines': 'ะ›ะธะฝะธะธ',\n 'sfeeders': 'ะคะธะดะตั€ั‹',\n 'subscribers': 'ะžั€ะณะฐะฝะธะทะฐั†ะธะธ',\n 'persons': ' ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝั‹ะต ะปะธั†ะฐ',\n 'phones': 'ะขะตะปะตั„ะพะฝั‹',\n 'map': 'L-MAP',\n 'y-map': 'Y-MAP',\n }\n }\n\n\ndef class_volt(request):\n return {\n 'feeder_6': \"btn btn-light border-success px-3 py-0 mx-1\",\n 'feeder_10': \"btn btn-light border-primary px-3 py-0 mx-1\",\n 'feeder_x': \"btn btn-secondary px-3 py-0 mx-1\",\n 'line_35': \"border-danger btn btn-dark py-0 px-4 m-1 vl-border\",\n 'line_110': 'border-warning btn btn-dark py-0 px-4 m-1 vl-border',\n 'line_x': 'btn btn-secondary py-0 px-4 m-1',\n\n }\n" }, { "alpha_fraction": 0.47782546281814575, "alphanum_fraction": 0.6952789425849915, "avg_line_length": 16.04878044128418, "blob_id": "32adfcbea3c393cfde57b5ccc7b5fa8bf06e81de", "content_id": "b670c6228816e2514662427731e9134956e19cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 699, "license_type": "no_license", "max_line_length": 32, "num_lines": 41, "path": "/requirements.txt", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "asgiref==3.2.10\nastroid==2.4.2\nautopep8==1.5.4\ncertifi==2020.6.20\nchardet==3.0.4\ncolorama==0.4.3\ndefusedxml==0.6.0\ndiff-match-patch==20200713\nDjango==3.1.1\ndjango-autocomplete-light==3.5.1\ndjango-crispy-forms==1.9.2\ndjango-import-export==2.4.0\ndjango-silk==4.1.0\net-xmlfile==1.0.1\ngprof2dot==2019.11.30\nidna==2.10\nisort==5.6.3\njdcal==1.4.1\nJinja2==2.11.2\nlazy-object-proxy==1.4.3\nMarkupPy==1.14\nMarkupSafe==1.1.1\nmccabe==0.6.1\nodfpy==1.4.1\nopenpyxl==3.0.5\nprotobuf==3.13.0\npycodestyle==2.6.0\nPygments==2.7.1\npylint==2.6.0\npython-dateutil==2.8.1\npytz==2020.1\nPyYAML==5.3.1\nrequests==2.24.0\nsix==1.15.0\nsqlparse==0.3.1\ntablib==2.0.0\ntoml==0.10.1\nurllib3==1.25.11\nwrapt==1.12.1\nxlrd==1.2.0\nxlwt==1.3.0\n" }, { "alpha_fraction": 0.6012242436408997, "alphanum_fraction": 0.624151349067688, "avg_line_length": 29.050167083740234, "blob_id": "67d937d7aed100e29e6c44136bc51f55ffecfbaf", "content_id": "66415fb39b275d7943ebd05c1fb38903195e0926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9817, "license_type": "no_license", "max_line_length": 161, "num_lines": 299, "path": "/Tula_Networks/Tula_Networks/static/scripts/maps/map.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "//ัะพะฑะธั€ะฐะตั‚ JSON ะธะท ะฟะพะณะพ, ั‡ั‚ะพ ะพั‚ั€ะตะฝะดะตั€ะธะด django\nconst jsPsInfoD = document.querySelector('.js-ps-info-dict');\nconst jsonD1 = jsPsInfoD.textContent;\nconst jsonDataProto = \"{\" + jsonD1.slice(0, jsonD1.length-2) + \"}\";\nconst jsonData = JSON.parse(jsonDataProto);\n//---\n\n// ะฟะตั€ะตะฒะพะดะธั‚ ะบะพะพั€ะดะธะฝะฐั‚ั‹ ะฒ ั‚ะฐะบะธะต ะบะพั‚ะพั€ั‹ะต ั‡ะธั‚ะฐะตั‚ leaflet\nconst getCoordinate = (coo) => {\n const directions = ['E', 'W', 'N', 'S']\n\n if (directions.some((dir) => coo.endsWith(dir))) {\n const newCoo = coo.replace(/\\D/g, ' ').trim().split(' ').map((init) => +init);\n return newCoo[0] + newCoo[1]/60 + newCoo[2]/3600;\n }\n return +coo;\n}\n\nconst getTrueCoordinate = (coo) => {\n const [lat, ...rest] = coo.trim().split(' ');\n const lng = rest[rest.length-1]\n return [getCoordinate(lat), getCoordinate(lng)];\n};\n//---\n\n\n// ะทะฐะฑะธั€ะฐะตั‚ ะธะท ั€ะฐะฝะตะต ะฟะพะดะบะปัŽั‡ะตะฝะฝะพะณะพ coordinates.js ัะปะพะฒะฐั€ัŒ ะบะพะพั€ะดะธะฝะฐั‚, ะดะตะปะฐะตั‚ ะผะฐััะธะฒ: [ะฝะพะผะตั€, [ัˆะธั€ะพั‚ะฐ, ะดะพะปะณะพั‚ะฐ]]\n// .ั„ะธะปัŒั€ั€ัƒะตั‚ ะพั‚ ะฟัƒัั‚ั‹ั… ะบะพะพั€ะดะธะฝะฐั‚\n// .ะดะตะปะฐะตั‚ ะผะฐััะธะฒ ะพะฑัŠะตะบั‚ะพะฒ {ะฝะพะผะตั€, ะพั‚ั„ะพั€ะผะฐั‚ะธั€ะพะฒะฐะฝะฝั‹ะต ะบะพั€ะดะธะฝะฐั‚ั‹}\nconst allPoints = Object.entries(coordinates)\n .filter((point) => point[1] !== '')\n .map(point => ({number: +point[0], coordinate: getTrueCoordinate(point[1])}))\n\n// ัะตะดะธะฝัะตั‚ ะทะฝะฐั‡ะตะฝะธั ะฒ 2-ั… ะผะฐัะธะฒะพะฒ (ะธะท ะฟั€ะตะพะฑั€ะฐะทะพะฒะฐะฝะฝะพะณะพ coordinates.js ะธ ะธะท ั€ะฐัะฟะฐั€ัะตะฝะฝะพะณะพ ะดะถะตะนัะพะฝะฐ ะพั‚ั€ะตะฝะดะตั€ะตะฝะฝะพ ะดะถะฐะฝะณะพ)\nconst allBigPoints = allPoints.map(point => ({...point, ...jsonData[point.number]}));\n// console.log(allBigPoints)\n\n\n// LEAFLET\n\nconst openStreetMapTile = {\n png: 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',\n attribution: '&copy; <a href=\"https://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors',\n };\n\n// ะฟะพะฟะฐะฟ ะธ ะฟะธะฝั‹\n\nconst createPopup = (point) => `<a href=\"${point.url}\"><h3 class=\"text-center font-weight-bold\">ะŸะก ${point.voltage} ะบะ’ โ„– ${point.number} ${point.name} </h3></a>`\n\nconst icons = {\n // start: {\n // iconSize: [64, 64],\n // iconAnchor: [32, 64],\n // iconUrl: 'img/green.svg',\n // },\n // end: {\n // iconSize: [64, 64],\n // iconAnchor: [32, 64],\n // iconUrl: 'img/orange.svg',\n // },\n v35: {\n iconSize: [52, 52],\n iconAnchor: [26, 52],\n iconUrl: 'static/css/images/red.svg',\n },\n v110: {\n iconSize: [52, 52],\n iconAnchor: [26, 52],\n iconUrl: 'static/css/images/yellow.svg',\n },\n v220: {\n iconSize: [52, 52],\n iconAnchor: [26, 52],\n iconUrl: 'static/css/images/blue.svg',\n },\n // other: {\n // iconSize: [32, 32],\n // iconAnchor: [16, 32],\n // iconUrl: 'img/black.svg',\n // },\n}\n\n\n\nconst pin220 = L.icon(icons.v220);\nconst pin110 = L.icon(icons.v110);\nconst pin35 = L.icon(icons.v35);\n// const pinOther = L.icon(icons.other);\n// const pinStart = L.icon(icons.start);\n// const pinEnd = L.icon(icons.end);\n\n//ะฒะพะทะฒั€ะฐั‰ะฐะตั‚ [ะบะพะพั€ะดะธะฝะฐั‚ั‹] ะŸะก ะฟะพ ะตะต ะฝะพะผะตั€ัƒ\nconst getPSCoordinate = (number) => allBigPoints.find(point => point.number === number).coordinate;\n\n\n\n// ะšะะ ะขะ\n\nconst map = L.map('map');\n\n\n// ะฒะพะทะฒั€ะฐั‰ะฐะตั‚ ะณั€ัƒะฟะฟัƒ ั‚ะพั‡ะตะบ ะฝะฐ ะบะฐั€ั‚ัƒ (ะผะฐััะธะฒ, ะฟะธะฝ) \nconst createMarkerGroup = (points, pin) => {\n const markerGroup = L.layerGroup().addTo(map)\n const createMarker = (point) => {\n const [lat, lng] = point.coordinate;\n const marker = L.marker({lat, lng}, {icon: pin});\n marker\n .addTo(markerGroup)\n .bindPopup(createPopup(point))\n };\n points.forEach((point) => createMarker(point));\n return markerGroup;\n}\n\nconst ps29 = getPSCoordinate(29); //ั‚ะพั‡ะบะฐ ะดะปั ั†ะตั‚ั€ะฐ ะบะฐั€ั‚ั‹\n\n//ั†ะตั‚ั€ ะบะฐั€ั‚ั‹\nmap.setView({\n lat: ps29[0],\n lng: ps29[1],\n}, 10);\n\n//ะบะพะฟะธั€ะฐะนั‚\nL.tileLayer(\n openStreetMapTile.png, {\n attribution: openStreetMapTile.attribution,\n },\n).addTo(map);\n\n\n// ะณั€ัƒะฟะฟั‹ ั‚ะพั‡ะตะบ ะดะปั ะฟะตั€ะฒะพะฝะฐั‡ะฐะปัŒะฝะพะณะพ ะพั‚ะพะฑั€ะฐะถะตะฝะธั ะฟั€ะธ ะทะฐะณั€ัƒะทะบะต\nconst points35 = allBigPoints.filter((p) => p.voltage === 35);\nconst points110 = allBigPoints.filter((p) => p.voltage === 110)\nconst points220 = allBigPoints.filter((p) => p.voltage === 220)\n// ะพะฑะฝะพะฒะปัะตะผั‹ะต ะณั€ัƒะฟะฟั‹ ะฝะฐ ะบะฐั€ั‚ะต\nlet mg110 = createMarkerGroup(points110, pin110);\nlet mg220 = createMarkerGroup(points220, pin220);\nlet mg35 = createMarkerGroup(points35, pin35);\n\n\n//// ะ‘ะปะพะบ ั‡ะตะบะฑะพะบัะพะฒ\n\nconst boxesWrapper = document.querySelector('.checkboxes-wrapper')\nconst allPsCB = boxesWrapper.querySelector('#all');\n\nconst voltageFieldset = boxesWrapper.querySelector('.map_substations_checkbox--voltage');\nconst subVoltBoxes = voltageFieldset.querySelectorAll('input');\n\nconst eachPSFieldset = boxesWrapper.querySelector('.map_substations_checkbox');\nconst subBoxes = eachPSFieldset.querySelectorAll('input');\n\n// ั„ะธะปัŒั‚ั€ัƒะตั‚ ั‚ะพั‡ะบะธ ะธะท ะผะฐััะธะฒะฐ ะฟะพ ั‡ะตะบะฝัƒั‚ั‹ะผ ั‡ะตะบะฑะพะบัะฐะผ ะธ ะฒะพะทะฒั€ะฐั‰ะฐะตั‚ ะพะฑัŠะตะบั‚ {ps35: [ะฒัะต ั‡ะตะบะฝัƒั‚ั‹ะต ะŸะก 35 ะบะ’], ps110: [...], ...}\nconst filterPoints = () => {\n const checkedIds = Array.from(subBoxes).filter((box) => box.checked).map((box) => +box.id);\n const checkedPoints = allBigPoints.filter((point) => checkedIds.some((id) => id === point.number))\n const getCheckedByVolt = (arr, volt) => arr.filter((point) => point.voltage === volt);\n return {\n ps35: getCheckedByVolt(checkedPoints, 35),\n ps110: getCheckedByVolt(checkedPoints, 110),\n ps220: getCheckedByVolt(checkedPoints, 220),\n }\n}\n\n// ะผะตะฝัะตั‚ ะฒะธะด ั‡ะตะบะฑะพะบัะฐ ะฒ ะทะฐะฒะธัะธะผะพัั‚ะธ ะพั‚ ั‡ะตะบะฝัƒั‚ะพัั‚ะธ\nconst changeBoxActive = (box) => {\n const parent = box.closest('label')\n if (box.checked) {\n parent.classList.add('active');\n } else {\n parent.classList.remove('active');\n }\n}\n\nconst resetEachBox = (boxes) => boxes.forEach((box) => {\n box.checked = false;\n changeBoxActive(box);\n});\n\n\n// ะžะ‘ะ ะะ‘ะžะขะงะ˜ะšะ˜ ะะ ะคะ˜ะ›ะ”ะกะ•ะขะซ ะงะ•ะšะ‘ะžะšะกะžะ’ ะธ ั‡ะตะบะฑะพะบั ะ’ะกะ• ะŸะก\neachPSFieldset.addEventListener('change', (evt) => {\n if (evt.target && evt.target.classList.contains('each-ps')) {\n changeBoxActive(evt.target);\n resetEachBox([allPsCB]);\n resetEachBox(subVoltBoxes);\n }\n})\n\n\nallPsCB.addEventListener('change', () => {\n const parent = allPsCB.closest('label')\n if (allPsCB.checked) {\n parent.classList.add('active');\n subVoltBoxes.forEach((box) => box.checked = true);\n subBoxes.forEach((box) => box.checked = true);\n } else {\n parent.classList.remove('active');\n subVoltBoxes.forEach((box) => box.checked = false);\n subBoxes.forEach((box) => box.checked = false);\n }\n subVoltBoxes.forEach((box) => changeBoxActive(box));\n subBoxes.forEach((box) => changeBoxActive(box));\n})\n\n\nvoltageFieldset.addEventListener('change', (evt) => {\n const target = evt.target;\n if (target && target.classList.contains('each-volt')) {\n resetEachBox([allPsCB]);\n changeBoxActive(target);\n\n const voltages = [];\n subVoltBoxes.forEach((box_v) => {\n if (box_v.checked) {\n voltages.push(box_v.id)\n }\n })\n \n subBoxes.forEach((subBox) => {\n if (voltages.some((voltage) => subBox.dataset.volt === voltage)) {\n subBox.checked = true;\n } else {\n subBox.checked = false;\n }\n changeBoxActive(subBox)\n })\n }\n})\n\n\n//// ะžะ‘ะ ะะ‘ะžะงะ˜ะš ะฝะฐ ะ”ะ˜ะ’ ะกะž ะ’ะกะ•ะœะ˜ ะงะ•ะšะ‘ะžะšะกะะœะ˜ ะŸะก\n\nconst reload = document.querySelector('.checkboxes-block');\n\nreload.addEventListener('click', (evt) => {\n // console.log(evt.target)\n setTimeout(() => {\n const points = filterPoints();\n mg35.remove()\n mg110.remove()\n mg220.remove()\n mg35 = createMarkerGroup(points.ps35, pin35);\n mg110 = createMarkerGroup(points.ps110, pin110);\n mg220 = createMarkerGroup(points.ps220, pin220);\n }, 10)\n\n})\n\n\n// ะœะะ ะจะ ะฃะขะซ\n\nconst from = document.querySelector('#from');\nconst to = document.querySelector('#to');\nconst findRout = document.querySelector('#find-rout');\nconst resetRout = document.querySelector('#reset-rout');\n\n// ะพะฑะฝะพะฒะปัะตะผั‹ะน ะผะฐั€ัˆั€ัƒั‚ ะธ ะตะณะพ ัƒะฑะธะฒะตั†\nlet rout;\nconst delRout = (rout) => rout ? rout.remove() : null;\n\n\n// ะฒะพะทะฒั€ะฐั‰ะฐะตั‚ ะผะฐั€ัˆั€ัƒั‚ ะฟะพ ะฝะพะผะตั€ะฐะผ ะŸะก\nconst createRout = (numFrom, numTo) => {\n const rout = L.Routing.control({\n waypoints: [\n L.latLng(...getPSCoordinate(numFrom)),\n L.latLng(...getPSCoordinate(numTo))\n ]\n }).addTo(map);\n if (numFrom !== numTo) {\n return rout;\n }\n};\n\n\n// ะพะฑั€ะฐะฑะพั‚ั‡ะธะบะธ ะบะฝะพะฟะพะบ ัะพะทะดะฐั‚ัŒ ะธ ัะฑั€ะพัะธั‚ัŒ ะผะฐั€ัˆั€ัƒั‚\n\nfindRout.addEventListener('click', () => {\n const fromValue = +from.value;\n const toValue = +to.value;\n if (coordinates[fromValue] && coordinates[toValue]) {\n // console.log(coordinates[fromValue], coordinates[fromValue])\n delRout(rout);\n rout = createRout(fromValue, toValue);\n } else {\n findRout.textContent = 'ะะต ะฟะพะตะดัƒ...';\n findRout.classList.add('text-dark', 'font-weight-bold');\n\n setTimeout(() => {\n findRout.textContent = 'ะŸะพัั‚ั€ะพะธั‚ัŒ ะผะฐั€ัˆั€ัƒั‚';\n findRout.classList.remove('text-dark', 'font-weight-bold');\n }, 1000)\n }\n \n})\n\n\nresetRout.addEventListener('click', () => delRout(rout))\n" }, { "alpha_fraction": 0.5599920749664307, "alphanum_fraction": 0.5786650776863098, "avg_line_length": 34.9571418762207, "blob_id": "50f11306c2f86ea0ab3ce38d08f849a459cffbe8", "content_id": "6d0f038b2c634c87615688a9615a070a5032efdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5451, "license_type": "no_license", "max_line_length": 119, "num_lines": 140, "path": "/Tula_Networks/Tula_Networks/static/scripts/maps/y-map.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "const ClasseBS = {\n ROUT: ['d-flex', 'justify-content-center'],\n BOXES: ['checkboxes-wrapper', 'd-flex', 'justify-content-center'],\n NONE: ['d-none'],\n};\n\nColor = {\n ps35: 'danger',\n ps110: 'warning',\n ps220: 'primary',\n other: 'dark',\n}\n\nconst presets = { // ัั‚ะธะปะธ ั‚ะพั‡ะบะธ ะฝะฐ ะบะฐั€ั‚ะต\n ps35: 'islands#redStretchyIcon',\n ps110: 'islands#yellowStretchyIcon',\n ps220: 'islands#darkBlueStretchyIcon',\n other: 'islands#blackStretchyIcon',\n}\n\nconst routInput = document.querySelector('.rout-inputs');\n\nconst from = document.querySelector('#from');\nconst to = document.querySelector('#to');\nconst findRout = document.querySelector('#find-rout');\nconst routForm = document.querySelector('#rout-form')\n\n\nymaps.ready(function () {\n\n routInput.classList.add(...ClasseBS.ROUT);\n routInput.classList.remove(...ClasseBS.NONE);\n boxesWrapper.classList.add(...ClasseBS.BOXES);\n boxesWrapper.classList.remove(...ClasseBS.NONE);\n \n const myMap = new ymaps.Map('y-map', {\n center: getPSCoordinate(29),\n zoom: 9,\n controls: ['routePanelControl']\n });\n\n // ะŸะพะปัƒั‡ะตะฝะธะต ััั‹ะปะบะธ ะฝะฐ ะฟะฐะฝะตะปัŒ ะผะฐั€ัˆั€ัƒั‚ะธะทะฐั†ะธะธ.\n const control = myMap.controls.get('routePanelControl');\n\n\n let collection35;\n let collection110;\n let collection220;\n\n const addCollectionOnMap = () => {\n // ะฟั€ะพะฒะตั€ัะตั‚, ัะพะทะดะฐะฝะฐ ะปะธ ัƒะถะต ะบะพะปะตะบั†ะธั ะธ, ะตัะปะธ ะดะฐ, ัƒะดะฐะปัะตั‚ ะตะต ั ะบะฐั€ั‚ั‹\n const checkDellCollections = (collection) => collection ? myMap.geoObjects.remove(collection) : null;\n\n checkDellCollections(collection35);\n checkDellCollections(collection110);\n checkDellCollections(collection220);\n\n collection35 = new ymaps.GeoObjectCollection();\n collection110 = new ymaps.GeoObjectCollection();\n collection220 = new ymaps.GeoObjectCollection();\n\n const createPoint = (ps, style, color) => {\n const myGeoObject = new ymaps.GeoObject({\n // ะžะฟั€ะตะดะตะปะตะฝะธะต ะณะตะพะผะตั‚ั€ะธะธ\" Point\".\n geometry: {\n type: \"Point\",\n coordinates: ps.coordinate,\n },\n // ะžะฟั€ะตะดะตะปะตะฝะธะต ะดะฐะฝะฝั‹ั… ะณะตะพะพะฑัŠะตะบั‚ะฐ.\n properties: {\n iconContent: `<b>${ps.number}</b>`,\n hintContent: `<h5 class=\"m-0 p-0\">${ps.name}</h5>`,\n // iconCaption: ps.url, \n // balloonContentHeader: ps.name,\n balloonContentBody: `<a href=${ps.url}>\n <h4 class=\"text-dark text-center m-2\"><b>\n ะŸะก <span class=\"text-${color}\"> ${ps.voltage} ะบะ’ </span> โ„– ${ps.number} <br> ${ps.name}</b>\n </h4>\n </a>`,\n }\n }, {\n // ะฃัั‚ะฐะฝะพะฒะบะฐ ะฟั€ะตะดัƒัั‚ะฐะฝะพะฒะบะธ ะดะปั ะผะตั‚ะบะธ ั ั‚ะพั‡ะบะพะน ะธ ะฑะตะท ัะพะดะตั€ะถะธะผะพะณะพ.\n preset: style,\n // ะ’ะบะปัŽั‡ะตะฝะธะต ะฟะตั€ะตั‚ะฐัะบะธะฒะฐะฝะธั.\n draggable: false,\n // ะžั‚ะบะปัŽั‡ะตะฝะธะต ะทะฐะดะตั€ะถะบะธ ะดะปั ะทะฐะบั€ั‹ั‚ะธั ะฒัะฟะปั‹ะฒะฐัŽั‰ะตะน ะฟะพะดัะบะฐะทะบะธ.\n hintCloseTimeout: null\n });\n return myGeoObject;\n }\n\n // ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะพั‡ะตะบ ะฒ ะบะพะปะปะตะบั†ะธัŽ (ะบะพะปะปะตะบั†ะธั, ะผะฐััะธะฒ ั ะฒั‹ะฑั€ะฐะฝะฝั‹ะผะธ ะŸะก, ัั‚ะธะปัŒ ะดะปั ั‚ะพั‡ะตะบ ะฒ ะบะพะปะปะตะบั†ะธะธ)\n const addPointsToCollection = (collection, substations, style, color) => {\n substations.forEach((ps) => {\n const point = createPoint(ps, style, color);\n collection.add(point);\n })\n }\n\n addPointsToCollection(collection35, filterPoints().ps35, presets.ps35, Color.ps35);\n addPointsToCollection(collection110, filterPoints().ps110, presets.ps110, Color.ps110);\n addPointsToCollection(collection220, filterPoints().ps220, presets.ps220, Color.ps220);\n\n myMap.geoObjects.add(collection35);\n myMap.geoObjects.add(collection110);\n myMap.geoObjects.add(collection220);\n }\n\n\n eachPSFieldset.addEventListener('change', addCollectionOnMap);\n allPsCB.addEventListener('change', addCollectionOnMap);\n voltageFieldset.addEventListener('change', addCollectionOnMap);\n\n let lastControl;\n const setRout = (from, to) => {\n lastControl = control.routePanel.state.set({\n // ะะดั€ะตั ะฝะฐั‡ะฐะปัŒะฝะพะน ั‚ะพั‡ะบะธ.\n from: getPSCoordinate(from),\n // ะะดั€ะตั ะบะพะฝะตั‡ะฝะพะน ั‚ะพั‡ะบะธ.\n to: getPSCoordinate(to),\n });\n }\n\n routForm.addEventListener('submit', (evt) => {\n evt.preventDefault();\n const fromValue = +from.value;\n const toValue = +to.value;\n if (coordinates[fromValue] && coordinates[toValue]) {\n setRout(fromValue, toValue);\n } else {\n findRout.textContent = 'ะะต ะฟะพะตะดัƒ...';\n findRout.classList.add('text-dark', 'font-weight-bold');\n \n setTimeout(() => {\n findRout.textContent = 'ะŸะพัั‚ั€ะพะธั‚ัŒ ะผะฐั€ัˆั€ัƒั‚';\n findRout.classList.remove('text-dark', 'font-weight-bold');\n }, 1000)\n }\n })\n});\n" }, { "alpha_fraction": 0.7444444298744202, "alphanum_fraction": 0.7444444298744202, "avg_line_length": 17, "blob_id": "415e0f284682a4096df692720b3d9587bbbdf6ad", "content_id": "d956034d7efd28abe0c933f7e5ca95bfcab69c6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/Tula_Networks/tula_net/apps.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass TulaNetConfig(AppConfig):\n name = 'tula_net'\n" }, { "alpha_fraction": 0.6649179458618164, "alphanum_fraction": 0.6704081296920776, "avg_line_length": 46.239192962646484, "blob_id": "bf42b00ebbd512e9c92d9356ea5b1392d9ef6403", "content_id": "2ebb858c17530b2fa989fe97119acc0a47bf52a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17693, "license_type": "no_license", "max_line_length": 121, "num_lines": 347, "path": "/Tula_Networks/tula_net/models.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.urls import reverse\n\n\nclass Region(models.Model):\n name = models.CharField(max_length=128, verbose_name='ะ ะตะณะธะพะฝ', unique=True)\n for_menu = models.BooleanField(default=False, verbose_name='ะ”ะพะฑะฐะฒะธั‚ัŒ ะฒ ะผะตะฝัŽ')\n\n def get_line_url(self):\n return reverse('line_region', kwargs={'pk': self.pk})\n\n class Meta:\n verbose_name = \"ะ ะตะณะธะพะฝ\"\n verbose_name_plural = \"ะ ะตะณะธะพะฝ\"\n\n def __str__(self):\n return self.name\n\n\nclass ClassVoltage(models.Model):\n class_voltage = models.SmallIntegerField(verbose_name='ะšะปะฐัั ะฝะฐะฟั€ัะถะตะฝะธั')\n\n def get_absolute_url(self):\n return reverse('voltage', kwargs={'pk': self.pk})\n\n def get_line_url(self):\n return reverse('line_voltage', kwargs={'pk': self.pk})\n\n class Meta:\n verbose_name = \"ะฝะฐะฟั€ัะถะตะฝะธะต\"\n verbose_name_plural = \"ะฝะฐะฟั€ัะถะตะฝะธั\"\n ordering = ['-class_voltage']\n\n def __str__(self):\n return ' '.join((str(self.class_voltage), 'ะบะ’'))\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=64, verbose_name='ะ“ั€ัƒะฟะฟะฐ', unique=True)\n location = models.TextField(verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต', blank=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n ours = models.BooleanField(verbose_name='ะฝะฐัˆะธ', blank=True, null=True)\n\n def get_absolute_url(self):\n return reverse('group', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะ“ั€ัƒะฟะฟะฐ\"\n verbose_name_plural = \"ะ“ั€ัƒะฟะฟั‹\"\n ordering = ['-ours', 'name']\n\n\nclass GroupLine(models.Model):\n name = models.CharField(max_length=64, verbose_name='ะ“ั€ัƒะฟะฟะฐ', unique=True)\n location = models.TextField(verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต', blank=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n ours = models.BooleanField(verbose_name='ะฝะฐัˆะธ', blank=True, null=True)\n\n def get_absolute_url(self):\n return reverse('line_group', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›\"\n verbose_name_plural = \"ะฃั‡ะฐัั‚ะบะธ ัะป ะ’ะ›\"\n ordering = ['-ours', 'name']\n\n\nclass Res(models.Model):\n name = models.CharField(max_length=32, verbose_name='ะ ะญะก', unique=True)\n short_name = models.CharField(max_length=16, verbose_name='ะ ะญะก ัะพะบั€ะฐั‰ + ัƒั‡ะฐัั‚ะพะบ', unique=True)\n location = models.TextField(verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต', blank=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n\n def get_absolute_url(self):\n return reverse('res', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.short_name\n\n class Meta:\n verbose_name = \"ะ ะญะก\"\n verbose_name_plural = \"ะ ะญะกั‹\"\n ordering = ['name']\n\n\nclass Substation(models.Model):\n number = models.PositiveSmallIntegerField(verbose_name='ะะพะผะตั€ ะŸะก', unique=True)\n name = models.CharField(max_length=32, verbose_name='ะะฐะทะฒะฐะฝะธะต ะŸะก', unique=True)\n voltage_h = models.ForeignKey(\n ClassVoltage, verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ะฒั‹ัะพะบะพะต', related_name='ps_volt_h',\n blank=True, null=True, on_delete=models.PROTECT)\n voltage_m = models.ForeignKey(\n ClassVoltage, verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ัั€ะตะดะฝะตะต', related_name='ps_volt_m',\n blank=True, null=True, on_delete=models.PROTECT)\n voltage_l = models.ForeignKey(\n ClassVoltage, verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ะฝะธะทะบะพะต', related_name='ps_volt_l',\n blank=True, null=True, on_delete=models.PROTECT)\n alien = models.BooleanField(verbose_name='ั‡ัƒะถะฐั?')\n owner = models.ForeignKey('Subscriber', related_name='substations', verbose_name='ะ’ะปะฐะดะตะปะตั†',\n on_delete=models.SET_NULL, blank=True, null=True)\n group = models.ForeignKey(Group, related_name='substations', verbose_name='ะ“ั€ัƒะฟะฟะฐ',\n on_delete=models.CASCADE)\n location = models.TextField(verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต', blank=True)\n region = models.ForeignKey(Region, verbose_name='ะฃั‡ะฐัั‚ะพะบ', related_name='substations',\n on_delete=models.SET_NULL, blank=True, null=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n\n def get_absolute_url(self):\n return reverse('substation', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะŸะพะดัั‚ะฐะฝั†ะธั\"\n verbose_name_plural = \"ะŸะพะดัั‚ะฐะฝั†ะธะธ\"\n # ordering = ['-voltage_h', 'name']\n ordering = ['number']\n\n\nclass Section(models.Model):\n substation = models.ForeignKey(Substation, related_name='sections', on_delete=models.CASCADE)\n number = models.PositiveSmallIntegerField(verbose_name='โ„– ัะตะบั†ะธะธ', blank=True, null=True)\n name = models.CharField(max_length=32, verbose_name='ะะฐะทะฒะฐะฝะธะต ัะตะบั†ะธะธ')\n voltage = models.ForeignKey(ClassVoltage, verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต', on_delete=models.PROTECT, blank=True,\n null=True)\n from_T = models.PositiveSmallIntegerField(verbose_name='ะฟะธั‚ะฐะตั‚ัั ะพั‚/ะฟะธั‚ะฐะตั‚ ะข โ„–', blank=True, null=True)\n blind = models.BooleanField(default=False, verbose_name='ะขัƒะฟะธะบะพะฒะฐั')\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n\n def get_absolute_url(self):\n return reverse('one_section', kwargs={'pk': self.pk})\n\n def __str__(self):\n return ' '.join((str(self.name), 'ะŸะก', str(self.substation)))\n\n class Meta:\n verbose_name = \"ะกะตะบั†ะธั\"\n verbose_name_plural = \"ะกะตะบั†ะธะธ\"\n ordering = ['voltage__class_voltage', 'name']\n\n\n\nclass Subscriber(models.Model):\n name = models.CharField(max_length=128, verbose_name='ะะฐะทะฒะฐะฝะธะต ะพั€ะณะฐะฝะธะทะฐั†ะธะธ', unique=True)\n short_name = models.CharField(max_length=16, verbose_name='ะะฐะทะฒ ะพั€ะณะฐะฝ ัะพะบั€ะฐั‰', blank=True, null=True)\n ours = models.BooleanField(verbose_name='ะฝะฐัˆะธ')\n year_update = models.PositiveSmallIntegerField(verbose_name='ะกะฟะธัะบะธ ะพะฑะฝะพะฒะปะตะฝั‹', blank=True, null=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n region = models.ForeignKey(\n Region, verbose_name='ัƒั‡ะฐัั‚ะพะบ', default=2, blank=True, null=True, on_delete=models.SET_NULL)\n\n def get_absolute_url(self):\n return reverse('subscriber', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะพั€ะณะฐะฝะธะทะฐั†ะธั\"\n verbose_name_plural = \"ะพั€ะณะฐะฝะธะทะฐั†ะธะธ\"\n ordering = ['-ours', 'name']\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=128, verbose_name='ะคะ˜ะž')\n subscriber = models.ForeignKey(Subscriber, related_name='persons', on_delete=models.CASCADE)\n position = models.CharField(max_length=64, verbose_name='ะดะพะปะถะฝะพัั‚ัŒ', blank=True, null=True)\n priority = models.PositiveSmallIntegerField(blank=True, verbose_name='ะฟั€ะธะพั€ะธั‚ะตั‚', null=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True)\n\n # mail = models.EmailField(max_length=32, verbose_name='ัะปะตะบั‚ั€ะพะฝะบะฐ', blank=True)\n\n def get_absolute_url(self):\n return reverse('person', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝะพะต ะปะธั†ะพ\"\n verbose_name_plural = \"ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝั‹ะต ะปะธั†ะฐ\"\n ordering = ['-priority']\n\n\nclass Feeder(models.Model):\n name = models.CharField(max_length=128, verbose_name='ะะฐะทะฒะฐะฝะธะต ั„ะธะดะตั€ะฐ')\n try_number_name = models.SmallIntegerField(blank=True, null=True)\n substation = models.ForeignKey(Substation, related_name='feeders', on_delete=models.CASCADE, verbose_name='ะŸะก')\n section = models.ForeignKey(Section, related_name='feeders', on_delete=models.CASCADE, verbose_name='ะกะบะจ',\n blank=True, null=True)\n subscriber = models.ForeignKey(Subscriber, related_name='feeders', on_delete=models.SET_NULL,\n verbose_name='ะฐะฑะพะฝะตะฝั‚', blank=True, null=True)\n attention = models.BooleanField(verbose_name='!!!', default=False)\n in_reserve = models.BooleanField(default=False, verbose_name='ะ ะตะทะตั€ะฒะฝั‹ะน')\n region = models.ForeignKey(Region, verbose_name='ะฃั‡ะฐัั‚ะพะบ', related_name='feeders',\n on_delete=models.SET_NULL, blank=True, null=True)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True, null=True)\n # res = models.CharField(blank=True, max_length=32, verbose_name='ะ ะญะก', null=True)\n # reliability_category = models.PositiveSmallIntegerField(blank=True, verbose_name='ะบะฐั‚ะตะณะพั€ะธั ะฝะฐะดะตะถะฝะพัั‚ะธ', null=True)\n\n def get_absolute_url(self):\n return reverse('feeder', kwargs={'pk': self.pk})\n\n def __str__(self):\n return ' '.join(('ั„ะธะด', str(self.name), 'ะŸะก', str(self.substation)))\n\n class Meta:\n verbose_name = \"ั„ะธะดะตั€\"\n verbose_name_plural = \"ั„ะธะดะตั€ะฐ\"\n ordering = ['in_reserve', 'section', 'try_number_name', 'name']\n\n\nclass Phone(models.Model):\n number = models.CharField(max_length=20, verbose_name='ะฝะพะผะตั€')\n search_number = models.CharField(max_length=16, verbose_name='ะะ• ะ—ะะŸะžะ›ะะฏะขะฌ', blank=True, null=True)\n subscriber = models.ForeignKey(Subscriber, related_name='phones', on_delete=models.CASCADE,\n blank=True, null=True, verbose_name='ะพั€ะณะฐะฝะธะทะฐั†ะธั')\n person = models.ForeignKey(Person, related_name='phones', on_delete=models.CASCADE,\n blank=True, null=True, verbose_name='ะปะธั†ะพ')\n substation = models.ForeignKey(Substation, related_name='phones', on_delete=models.CASCADE,\n blank=True, null=True, verbose_name='ะŸะก')\n priority = models.PositiveSmallIntegerField(blank=True, verbose_name='ะฟั€ะธะพั€ะธั‚ะตั‚', null=True)\n description = models.TextField(verbose_name='ะพะฟะธัะตะฝะธะต', blank=True)\n\n def get_absolute_url(self):\n return reverse('phone', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.number\n\n class Meta:\n verbose_name = \"ะขะตะปะตั„ะพะฝ\"\n verbose_name_plural = \"ะขะตะปะตั„ะพะฝั‹\"\n ordering = ['-priority']\n\n\nclass TransmissionLine(models.Model):\n name = models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต')\n full_name = models.CharField(max_length=128, verbose_name='ะŸะพะปะฝะพะต ะฝะฐะทะฒะฐะฝะธะต', blank=True, null=True)\n short_name = models.CharField(max_length=32, verbose_name='ะฆะธั„ั€ะพะฒะพะต ะฝะฐะทะฒะฐะฝะธะต', blank=True, default='')\n section = models.ManyToManyField(Section, verbose_name='ะกะตะบั†ะธั', related_name='lines0')\n voltage = models.ForeignKey(ClassVoltage, verbose_name='ะะฐะฟั€ัะถะตะฝะธะต', related_name='lines0',\n on_delete=models.PROTECT)\n management = models.ForeignKey(Region, verbose_name='ะฃะฟั€ะฐะฒะปะตะฝะธะต', related_name='lines_upr0',\n on_delete=models.CASCADE, default=2)\n maintenance = models.ManyToManyField(Region, verbose_name='ะ’ะตะดะตะฝะธะต', related_name='lines_ved0', blank=True)\n subscriber = models.ForeignKey(Subscriber, related_name='lines0', on_delete=models.SET_NULL,\n verbose_name='ะฐะฑะพะฝะตะฝั‚', blank=True, null=True)\n length = models.FloatField(verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ', blank=True, null=True)\n number_columns = models.PositiveSmallIntegerField(verbose_name='ะšะพะปะธั‡ะตัั‚ะฒะพ ะพะฟะพั€', blank=True, null=True)\n group = models.ForeignKey(GroupLine, related_name='lines0', verbose_name='ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›',\n on_delete=models.CASCADE, default=1)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True, null=True)\n kvl = models.BooleanField(verbose_name='ะšะ’ะ›?', default=False)\n\n def get_absolute_url(self):\n return reverse('line0', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะ›ะธะฝะธั - ะœะžะ”ะ•ะ›ะฌ ะ—ะะœะ•ะะ•ะะ\"\n verbose_name_plural = \"ะ›ะธะฝะธะธ - ะœะžะ”ะ•ะ›ะฌ ะ—ะะœะ•ะะ•ะะ\"\n ordering = ['voltage', 'short_name']\n\n\nclass Line(models.Model):\n name = models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต')\n full_name = models.CharField(max_length=128, verbose_name='ะŸะพะปะฝะพะต ะฝะฐะทะฒะฐะฝะธะต', blank=True, null=True)\n short_name = models.CharField(max_length=32, verbose_name='ะฆะธั„ั€ะพะฒะพะต ะฝะฐะทะฒะฐะฝะธะต', blank=True, default='')\n ps_p1 = models.PositiveSmallIntegerField('ะŸะก โ„–1')\n sec_p1 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–1')\n ps_p2 = models.PositiveSmallIntegerField('ะŸะก โ„–2', blank=True, null=True)\n sec_p2 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–2', blank=True, null=True)\n ps_m1 = models.PositiveSmallIntegerField('ะŸะก โ„–3', blank=True, null=True)\n sec_m1 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–3', blank=True, null=True)\n ps_m2 = models.PositiveSmallIntegerField('ะŸะก โ„–4', blank=True, null=True)\n sec_m2 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–4', blank=True, null=True)\n ps_m3 = models.PositiveSmallIntegerField('ะŸะก โ„–5', blank=True, null=True)\n sec_m3 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–5', blank=True, null=True)\n ps_m4 = models.PositiveSmallIntegerField('ะŸะก โ„–6', blank=True, null=True)\n sec_m4 = models.PositiveSmallIntegerField('ะกะจ ะŸะก โ„–6', blank=True, null=True)\n voltage = models.ForeignKey(ClassVoltage, verbose_name='ะะฐะฟั€ัะถะตะฝะธะต', related_name='lines', on_delete=models.PROTECT)\n management = models.ForeignKey(Region, verbose_name='ะฃะฟั€ะฐะฒะปะตะฝะธะต', related_name='lines_upr',\n on_delete=models.CASCADE, default=2)\n maintenance = models.ManyToManyField(Region, verbose_name='ะ’ะตะดะตะฝะธะต', related_name='lines_ved', blank=True)\n subscriber = models.ForeignKey(Subscriber, related_name='lines', on_delete=models.SET_NULL,\n verbose_name='ะฐะฑะพะฝะตะฝั‚', blank=True, null=True)\n length = models.FloatField(verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ', blank=True, null=True)\n number_columns = models.PositiveSmallIntegerField(verbose_name='ะšะพะปะธั‡ะตัั‚ะฒะพ ะพะฟะพั€', blank=True, null=True)\n group = models.ForeignKey(GroupLine, related_name='lines', verbose_name='ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›',\n on_delete=models.CASCADE, default=1)\n description = models.TextField(verbose_name='ะžะฟะธัะตะฝะธะต', blank=True, null=True)\n kvl = models.BooleanField(verbose_name='ะšะ’ะ›?', default=False)\n\n def get_absolute_url(self):\n return reverse('line', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"ะ›ะธะฝะธั\"\n verbose_name_plural = \"ะ›ะธะฝะธะธ\"\n ordering = ['voltage', 'short_name']\n\n\nclass Feeder_characteristic(models.Model):\n feeder_name = models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต ั„ะธะดะตั€ะฐ')\n substation_name = models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต ะŸะก')\n feeder = models.OneToOneField(Feeder, related_name=\"character\", on_delete=models.SET_NULL,\n null=True, blank=True, verbose_name='ั„ะธะดะตั€')\n length = models.FloatField(verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ', blank=True, null=True)\n tp_our_num = models.PositiveSmallIntegerField(blank=True, verbose_name='ะขะŸ ะฝะฐัˆะธ: ะบะพะปะธั‡ะตัั‚ะฒะพ', null=True)\n tp_alien_num = models.PositiveSmallIntegerField(blank=True, verbose_name='ะขะŸ ั‡ัƒะถะธะต: ะบะพะปะธั‡ะตัั‚ะฒะพ', null=True)\n villages_num = models.PositiveSmallIntegerField(blank=True, verbose_name='ะะŸ ะบะพะปะธั‡ะตัั‚ะฒะพ', null=True)\n villages_names = models.TextField(blank=True, verbose_name='ะะŸ ะฝะฐะทะฒะฐะฝะธั', null=True)\n power_winter = models.FloatField(verbose_name='ะ—ะธะผะฐ ะœะ’ั‚', blank=True, null=True)\n power_summer = models.FloatField(verbose_name='ะ›ะตั‚ะพ ะœะ’ั‚', blank=True, null=True)\n population = models.PositiveSmallIntegerField(blank=True, verbose_name='ะะฐัะตะปะตะฝะธะต', null=True)\n points = models.PositiveSmallIntegerField(blank=True, verbose_name='ะขะพั‡ะบะธ ะฟะพัั‚ะฐะฒะบะธ', null=True)\n social_num = models.PositiveSmallIntegerField(blank=True, verbose_name='ะกะพั†ะธะฐะปะบะฐ ะบะพะป-ะฒะพ', null=True)\n social_names = models.TextField(blank=True, verbose_name='ะกะพั†ะธะฐะปะบะฐ ั‚ะธะฟ', null=True)\n checked = models.BooleanField(verbose_name='ัะพะพั‚ะฒะตั‚ัั‚ะฒัƒะตั‚', default=False)\n\n def get_absolute_url(self):\n return reverse('feeder_char', kwargs={'pk': self.pk})\n\n def get_upd_url(self):\n return reverse('upd_charact_fl', kwargs={'pk': self.pk})\n\n\n def __str__(self):\n return ' '.join(('ะŸะก', self.substation_name, 'ั„ะธะด', self.feeder_name))\n\n class Meta:\n unique_together = ('feeder_name', 'substation_name')\n verbose_name = \"ั…ะฐั€ะฐะบั‚ะตั€ะธัั‚ะธะบะฐ ั„ะธะดะตั€ะฐ\"\n verbose_name_plural = \"ั…-ะบะธ ั„ะธะดะตั€ะพะฒ\"\n\n" }, { "alpha_fraction": 0.5538217425346375, "alphanum_fraction": 0.5554085969924927, "avg_line_length": 34.67924499511719, "blob_id": "d44fa239bc03cf0d9f849ebbda4520b123b75b3f", "content_id": "fdded811d4e776d4f2955c1d10b78ca98e4c7393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3781, "license_type": "no_license", "max_line_length": 96, "num_lines": 106, "path": "/Tula_Networks/Tula_Networks/static/scripts/phone_bar_not_in_phones.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "let phonesFlag = document.querySelector('.phones_flag')\nif(!phonesFlag) {\n let phoneBar = document.querySelector('.phone_bar');\n let templatePhone = document.querySelector('#template-side-phone').content;\n\n function clearStorageAndList() {\n localStorage.removeItem('phonesData');\n storList = [];\n }\n\n let reset = document.querySelector('.reset');\n\n reset.ondblclick = () => {\n clearStorageAndList();\n phoneBar.innerHTML = '';\n removeBtnRemove();\n }\n\n let storList = [];\n if (localStorage.getItem('phonesData')) {\n storList = JSON.parse(localStorage.getItem('phonesData'))\n } else {\n storList = [];\n }\n\n function removeBtnRemove() {\n if (storList.length < 2) {\n reset.classList.add('display_none');\n } else {console.log(storList.length)}\n }\n removeBtnRemove()\n\n function addNumberAndHref(obj, whatAdd, field) {\n if (obj[field]) {\n whatAdd.parentElement.parentElement.classList.remove('display_none');\n whatAdd.textContent = obj[field];\n whatAdd.parentElement.href = obj[`${field}Href`];\n }\n }\n\n function btnMarkerOnClick(btn, phone) {\n let phoneNum = phone.textContent;\n btn.addEventListener('click', (evt) => {\n evt.preventDefault();\n let aroundPhone = btn.parentElement;\n aroundPhone.classList.toggle('btn-warning');\n for (let i=0; i<storList.length; i++) {\n if (storList[i]['phone'] == phoneNum) {\n if (aroundPhone.classList.contains('btn-warning')) {\n storList[i]['color'] = 1;\n } else {\n storList[i]['color'] = 0;\n }\n console.log(storList);\n localStorage.setItem('phonesData', JSON.stringify(storList));\n break;\n }\n }\n })\n }\n\n function removeFromStorageOnClick(btn, phone) {\n let phoneNum = phone.textContent;\n btn.addEventListener('click', (evt) => {\n evt.preventDefault();\n for (let i=0; i<storList.length; i++) {\n if (storList[i]['phone'] == phoneNum) {\n storList.splice(i, 1);\n localStorage.setItem('phonesData', JSON.stringify(storList));\n break;\n }\n }\n phoneBar.innerHTML = '';\n addPhonesFromList();\n removeBtnRemove();\n })\n }\n\n\n function addPhonesFromList() {\n\n storList.forEach((item, i) => {\n let onePhone = templatePhone.cloneNode(true);\n let phoneAdd = onePhone.querySelector('.phone_add');\n let personAdd = onePhone.querySelector('.person_add');\n let subscriberAdd = onePhone.querySelector('.subscriber_add');\n let substationAdd = onePhone.querySelector('.substation_add');\n\n phoneAdd.textContent = item['phone'];\n phoneAdd.parentElement.parentElement.href=item['phoneHref'];\n addNumberAndHref(item, personAdd, 'person');\n addNumberAndHref(item, subscriberAdd, 'subscriber');\n addNumberAndHref(item, substationAdd, 'substation');\n\n if (item['color']) {\n phoneAdd.parentElement.parentElement.parentElement.classList.add('btn-warning');\n }\n btnRemove = onePhone.querySelector('.btn_remove');\n removeFromStorageOnClick(btnRemove, phoneAdd);\n btnMarkerColor = onePhone.querySelector('.marker_color');\n btnMarkerOnClick(btnMarkerColor, phoneAdd);\n phoneBar.appendChild(onePhone);\n })\n }\n addPhonesFromList();\n }" }, { "alpha_fraction": 0.710273802280426, "alphanum_fraction": 0.7105560302734375, "avg_line_length": 32.09812927246094, "blob_id": "fa7279e8f87c24e13f3ad736118a273f013d6bb7", "content_id": "6c4b5e1145f4662dba5bbe74f8d6c2fe7469c6bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7096, "license_type": "no_license", "max_line_length": 107, "num_lines": 214, "path": "/Tula_Networks/tula_net/admin.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom import_export.admin import ImportExportActionModelAdmin\nfrom import_export import resources, fields\nfrom import_export.widgets import ForeignKeyWidget\n\n# Register your models here.\nfrom .models import Substation, Section, Feeder, Subscriber, Person, Phone, Group, ClassVoltage,\\\n Region, GroupLine, Line, Feeder_characteristic\n\n\nclass GroupResourse(resources.ModelResource):\n class Meta:\n model = Group\n\n\nclass GroupAdmin(ImportExportActionModelAdmin):\n resource_class = GroupResourse\n list_display = ['pk', 'name', 'ours']\n list_display_links = ['name']\n search_fields = ['name']\n list_filter = ['name', ]\n\n\nadmin.site.register(Group, GroupAdmin)\n\n\nclass ClassVoltageAdmin(admin.ModelAdmin):\n list_display = ['pk', 'class_voltage']\n list_display_links = ['pk', 'class_voltage']\n\n\nadmin.site.register(ClassVoltage, ClassVoltageAdmin)\n\n\nclass RegionResourse(resources.ModelResource):\n class Meta:\n model = Region\n\n\nclass RegionAdmin(ImportExportActionModelAdmin):\n resource_class = RegionResourse\n list_display = ['pk', 'name']\n list_display_links = ['pk', 'name']\n\n\nadmin.site.register(Region, RegionAdmin)\n\n\nclass GroupLineAdmin(admin.ModelAdmin):\n list_display = ['pk', 'name', 'ours']\n list_display_links = ['name']\n search_fields = ['name']\n list_filter = ['name', ]\n\n\nadmin.site.register(GroupLine, GroupLineAdmin)\n\n\nclass SubstationResourse(resources.ModelResource):\n voltage_h = fields.Field(attribute='voltage_h', widget=ForeignKeyWidget(ClassVoltage, 'class_voltage'))\n voltage_m = fields.Field(attribute='voltage_m', widget=ForeignKeyWidget(ClassVoltage, 'class_voltage'))\n voltage_l = fields.Field(attribute='voltage_l', widget=ForeignKeyWidget(ClassVoltage, 'class_voltage'))\n owner = fields.Field(attribute='owner', widget=ForeignKeyWidget(Subscriber, 'name'))\n group = fields.Field(attribute='group', widget=ForeignKeyWidget(Group, 'name'))\n region = fields.Field(attribute='region', widget=ForeignKeyWidget(Region, 'name'))\n\n class Meta:\n model = Substation\n export_order = ('id', 'number', 'name', 'voltage_h', 'voltage_m', 'voltage_l',\n 'alien', 'owner', 'group', 'location', 'region', 'description',)\n\n\nclass SubstationAdmin(ImportExportActionModelAdmin):\n resource_class = SubstationResourse\n list_display = ['pk', 'number', 'name', 'voltage_h', 'voltage_m', 'voltage_l', 'group', 'alien']\n list_display_links = ['number', 'name']\n search_fields = ['number', 'name']\n list_filter = ['voltage_h', 'voltage_m', 'voltage_l', 'group', 'alien']\n\n\nadmin.site.register(Substation, SubstationAdmin)\n\n\nclass SectionResourse(resources.ModelResource):\n substation = fields.Field(attribute='substation', widget=ForeignKeyWidget(Substation, 'name'))\n voltage = fields.Field(attribute='voltage', widget=ForeignKeyWidget(ClassVoltage, 'class_voltage'))\n\n class Meta:\n model = Section\n\n\nclass SectionAdmin(ImportExportActionModelAdmin):\n resource_class = SectionResourse\n list_display = ['substation', 'name', 'voltage']\n list_display_links = ['name']\n search_fields = ['name']\n list_filter = ['voltage', 'substation']\n\nadmin.site.register(Section, SectionAdmin)\n\n\nclass SubscriberResourse(resources.ModelResource):\n class Meta:\n model = Subscriber\n\n\nclass SubscriberAdmin(ImportExportActionModelAdmin):\n resource_class = SubscriberResourse\n list_display = ['pk', 'short_name', 'name', 'ours', 'year_update']\n list_display_links = ['short_name', 'name']\n search_fields = ['short_name', 'name']\n list_filter = ['ours']\n\n\nadmin.site.register(Subscriber, SubscriberAdmin)\n\n\nclass PersonResourse(resources.ModelResource):\n\n subscriber = fields.Field(attribute='subscriber', widget=ForeignKeyWidget(Subscriber, 'name'))\n\n class Meta:\n model = Person\n\n\nclass PersonAdnin(ImportExportActionModelAdmin):\n resource_class = PersonResourse\n list_display = ['priority', 'name', 'subscriber', 'position']\n list_display_links = ['name']\n search_fields = ['name']\n list_filter = ['subscriber']\n\n\nadmin.site.register(Person, PersonAdnin)\n\n\nclass FeederResourse(resources.ModelResource):\n substation = fields.Field(attribute='substation', widget=ForeignKeyWidget(Substation, 'name'))\n section = fields.Field(attribute='section', widget=ForeignKeyWidget(Section, 'name'))\n subscriber = fields.Field(attribute='subscriber', widget=ForeignKeyWidget(Subscriber, 'name'))\n region = fields.Field(attribute='region', widget=ForeignKeyWidget(Region, 'name'))\n\n class Meta:\n model = Feeder\n\n\nclass FeederAdmin(ImportExportActionModelAdmin):\n resource_class = FeederResourse\n list_display = ['pk', 'name', 'substation', 'section', 'subscriber', 'attention', 'in_reserve']\n list_display_links = ['name']\n search_fields = ['name']\n list_filter = ['substation', 'attention']\n\n\nadmin.site.register(Feeder, FeederAdmin)\n\n\nclass Feeder_characteristicResourse(resources.ModelResource):\n feeder = fields.Field(attribute='feeder', widget=ForeignKeyWidget(Feeder, ('name')))\n # ะปะธัˆะฝะตะต ะฟะพะปะต!!!\n substation = fields.Field(attribute='feeder', widget=ForeignKeyWidget(Feeder, ('substation__name')))\n\n class Meta:\n model = Feeder_characteristic\n\n\nclass Feeder_characteristicAdmin(ImportExportActionModelAdmin):\n resource_class = Feeder_characteristicResourse\n list_display = ['feeder', 'length', 'population', 'points', 'checked']\n\n\nadmin.site.register(Feeder_characteristic, Feeder_characteristicAdmin)\n\n\nclass PhoneResourse(resources.ModelResource):\n subscriber = fields.Field(attribute='subscriber', widget=ForeignKeyWidget(Subscriber, 'name'))\n person = fields.Field(attribute='person', widget=ForeignKeyWidget(Person, 'name'))\n substation = fields.Field(attribute='substation', widget=ForeignKeyWidget(Substation, 'name'))\n\n class Meta:\n model = Phone\n\n\nclass PhoneAdmin(ImportExportActionModelAdmin):\n resource_class = PhoneResourse\n list_display = ['number', 'search_number', 'subscriber', 'person', 'substation']\n list_display_links = ['number']\n search_fields = ['number']\n list_filter = ['priority', 'subscriber', ]\n\n\nadmin.site.register(Phone, PhoneAdmin)\n\n\nclass LineResourse(resources.ModelResource):\n voltage = fields.Field(attribute='voltage', widget=ForeignKeyWidget(ClassVoltage, 'class_voltage'))\n management = fields.Field(attribute='management', widget=ForeignKeyWidget(Region, 'name'))\n subscriber = fields.Field(attribute='subscriber', widget=ForeignKeyWidget(Subscriber, 'mame'))\n group = fields.Field(attribute='group', widget=ForeignKeyWidget(GroupLine, 'name'))\n\n class Meta:\n model = Line\n\n\nclass Line1Admin(ImportExportActionModelAdmin):\n resource_class = LineResourse\n list_display = ['pk', 'name', 'short_name', 'voltage', 'kvl', 'subscriber']\n list_display_links = ['name']\n search_fields = ['name', 'short_name', ]\n list_filter = ['management', 'voltage']\n\n\nadmin.site.register(Line, Line1Admin)\n\n\n\n" }, { "alpha_fraction": 0.6218438744544983, "alphanum_fraction": 0.625450849533081, "avg_line_length": 33.31188201904297, "blob_id": "99b41625d34065cd957893d40e5a8bd3f0392d02", "content_id": "ee1728ea436f2415c036a4080a5c25d2c640f7e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7316, "license_type": "no_license", "max_line_length": 109, "num_lines": 202, "path": "/Tula_Networks/tula_net/utils.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nimport re\n# from .data import context_menu\n\n# ____ ัˆะฐะฑะปะพะฝ ะดะปั ั„ะพั€ะผ ___\nfrom tula_net.models import Substation, Group, Feeder, Section, ClassVoltage, GroupLine, Region, Line\n\n\nclass BaseCrispyForms:\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'ัะพั…ั€ะฐะฝะธั‚ัŒ ะธะทะผะตะฝะตะฝะธั'))\n\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-4'\n self.helper.field_class = 'col-lg-7'\n\n\n# ____ ัˆะฐะฑะปะพะฝ ะดะปั ั„ะพั€ะผ ั‚ะตะปะตั„ะพะฝะพะฒ ___\nclass PhoneFormAddMixin(BaseCrispyForms):\n\n def clean_search_number(self):\n raw_number = self.cleaned_data['number']\n # for i in raw_number:\n # if i.isalpha():\n # raise ValueError('ั…ะผ, ะฐ ัƒ ะ’ะฐั ะฒ ะฝะพะผะตั€ะต ะฑัƒะบะฒั‹, ะฝะฐะฟั€ะธะผะตั€...', i)\n search_number = ''.join([sign for sign in raw_number if sign.isdigit()])\n return search_number\n\n\nclass AddPhoneViewMixin:\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ\"\"\"\n model = None\n form_x = None\n\n def get(self, request, *args, **kwargs):\n form = self.form_x()\n\n form.fields[self.model.__name__.lower()].queryset = self.model.objects.filter(pk=self.kwargs['pk'])\n form.fields['search_number'].widget = forms.HiddenInput()\n return render(request, 'tula_net/form_add_phone.html', context={'form': form})\n\n def post(self, request, *args, **kwargs):\n bound_form = self.form_x(request.POST)\n if bound_form.is_valid():\n new_phone = bound_form.save()\n return redirect(new_phone)\n return render(request, 'tula_net/form_add_phone.html', context={'form': bound_form})\n\n\n# _______________ ัƒะดะฐะปะตะฝะธะต _________________\nclass DeleteObjectMixin:\n model = None\n target_reverse = None\n\n def get(self, request, *args, **kwargs):\n obj = self.model.objects.get(pk=self.kwargs['pk'])\n return render(request, 'tula_net/form_delete_object.html', context={'obj': obj})\n\n def post(self, request, *args, **kwargs):\n obj = self.model.objects.get(pk=self.kwargs['pk'])\n obj.delete()\n return redirect(reverse(self.target_reverse))\n\n\n# 345ms overall/46ms on queries/73 queries\n# 72ms overall 5ms on queries 3 queries\nclass SubstationsViewMixin:\n \"\"\" ัˆะฐะฑะปะพะฝ ะดะปั ะŸะก \"\"\"\n context_object_name = 'substations'\n template_name = 'tula_net/substations.html'\n menu = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะบะพะฝั‚ะตั…ั‚ะฝะพะณะพ ะผะตะฝัŽ\n flag = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะดะปั ะพั‚ะพะฑั€ะฐะถะตะฝะธั ะฒั‹ะฑะพั€ะพะบ ะŸะก ะฟะพ ะณั€ัƒะฟะฟะฐะผ ะธ ะฝะฐะฟั€ัะถะตะฝะธัŽ\n\n def get_queryset(self):\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l').all()\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['groups'] = Group.objects.all()\n context['voltages'] = ClassVoltage.objects.all()[1:3]\n context[self.flag] = 1\n return context\n\n\nclass FeedersViewMixin:\n \"\"\" ัˆะฐะฑะปะพะฝ ะดะปั ั„ะธะดะตั€ะพะฒ \"\"\"\n second_model = None # ะผะพะดะตะปัŒ ะฒ ะบะพะฝั‚ะตะบัั‚ ะดะปั ะพั‚ะพะฑั€ะฐะถะตะฝะธั ะบะพะฝะบั€ะตั‚ะฝะพะน ัะตะบั†ะธะธ ะธะธ ะŸะก\n the_context = None # ัะผ. ะฟั€ะตะดั‹ะด ะฟัƒะฝะบั‚\n template_name = 'tula_net/feeders.html'\n context_object_name = 'feeders'\n\n def get_queryset(self):\n return Feeder.objects.select_related('substation', 'section', 'subscriber', 'section__voltage').all()\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n if self.second_model:\n context[self.the_context] = self.second_model.objects.get(pk=self.kwargs['pk'])\n return context\n\n\ndef try_number_feeder(x):\n if x.isdigit():\n return int(x)\n return 0\n\n\nclass FeederFormMixin(BaseCrispyForms):\n\n def clean_search_number(self):\n raw_number = self.cleaned_data['name']\n try_number_name = ''.join([sign for sign in raw_number if sign.isdigit()])\n return try_number_name\n\n\n\nclass AddFeederMixin:\n \"\"\" ัˆะฐะฑะปะพะฝ ะดะปั ะดะพะฑะฐะฒะปะตะฝะธั ั„ะธะดะตั€ะฐ c ... \"\"\"\n form_feeder = None\n first_model = None\n first_field = None\n second_field = None\n\n def get(self, request, pk):\n form = self.form_feeder()\n form.fields['try_number_name'].widget = forms.HiddenInput()\n form.fields[self.first_field].queryset = self.first_model.objects.filter(pk=pk)\n if self.second_field and self.second_field == 'section':\n form.fields[self.second_field].queryset = Section.objects.filter(substation__pk=pk,\n voltage__class_voltage__lte=10)\n if self.second_field and self.second_field == 'substation':\n form.fields[self.second_field].queryset = Substation.objects.filter(sections__pk=pk)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, pk):\n bound_form = self.form_feeder(request.POST)\n if bound_form.is_valid():\n new_feeder = bound_form.save(commit=False)\n try_num = bound_form.cleaned_data['name']\n new_feeder.try_number_name = try_number_feeder(try_num)\n new_feeder.save()\n return redirect(new_feeder)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': bound_form})\n\n\ndef chang_search(obs):\n if '-' in obs:\n list_obs = obs.split('-')\n obs_n = ' - '.join([word.strip() for word in list_obs])\n print(obs_n)\n return obs_n\n return obs\n\n\ndef make_digits(num):\n return ''.join([n for n in num if n.isdigit()])\n\n\ndef try_int(num):\n try:\n x = int(num)\n return x\n except:\n return -1\n\n\nclass Lines1ViewMixin:\n \"\"\" ัˆะฐะฑะปะพะฝ ะดะปั \"\"\"\n context_object_name = 'lines'\n template_name = 'tula_net/lines1.html'\n menu = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะบะพะฝั‚ะตั…ั‚ะฝะพะณะพ ะผะตะฝัŽ\n flag = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะดะปั ะพั‚ะพะฑั€ะฐะถะตะฝะธั ะฒั‹ะฑะพั€ะพะบ ะŸะก ะฟะพ ะณั€ัƒะฟะฟะฐะผ ะธ ะฝะฐะฟั€ัะถะตะฝะธัŽ\n\n def get_queryset(self):\n return Line.objects.select_related('management', 'voltage', 'group')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n # context['context_menu'] = self.menu\n context['groups'] = GroupLine.objects.all()\n context['voltages'] = ClassVoltage.objects.all()[1:3]\n context['regions'] = Region.objects.filter(for_menu=True)\n context[self.flag] = 1\n return context\n\n\nclass SearchMixin:\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"s={self.request.GET.get('s')}&\"\n context['flag_search'] = self.request.GET.get('s')\n return context\n" }, { "alpha_fraction": 0.5869410634040833, "alphanum_fraction": 0.5920105576515198, "avg_line_length": 66.55479431152344, "blob_id": "a61b3f52f5d5c5d347e69f90086d5c9ebd1871ef", "content_id": "f0cb896804f3e77b80ab94204f149280cf6618fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20980, "license_type": "no_license", "max_line_length": 206, "num_lines": 292, "path": "/Tula_Networks/tula_net/migrations/0001_initial.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.1 on 2020-12-08 15:49\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='ClassVoltage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('class_voltage', models.SmallIntegerField(verbose_name='ะšะปะฐัั ะฝะฐะฟั€ัะถะตะฝะธั')),\n ],\n options={\n 'verbose_name': 'ะฝะฐะฟั€ัะถะตะฝะธะต',\n 'verbose_name_plural': 'ะฝะฐะฟั€ัะถะตะฝะธั',\n 'ordering': ['-class_voltage'],\n },\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64, unique=True, verbose_name='ะ“ั€ัƒะฟะฟะฐ')),\n ('location', models.TextField(blank=True, verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('ours', models.BooleanField(blank=True, null=True, verbose_name='ะฝะฐัˆะธ')),\n ],\n options={\n 'verbose_name': 'ะ“ั€ัƒะฟะฟะฐ',\n 'verbose_name_plural': 'ะ“ั€ัƒะฟะฟั‹',\n 'ordering': ['-ours', 'name'],\n },\n ),\n migrations.CreateModel(\n name='GroupLine',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64, unique=True, verbose_name='ะ“ั€ัƒะฟะฟะฐ')),\n ('location', models.TextField(blank=True, verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('ours', models.BooleanField(blank=True, null=True, verbose_name='ะฝะฐัˆะธ')),\n ],\n options={\n 'verbose_name': 'ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›',\n 'verbose_name_plural': 'ะฃั‡ะฐัั‚ะบะธ ัะป ะ’ะ›',\n 'ordering': ['-ours', 'name'],\n },\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, verbose_name='ะคะ˜ะž')),\n ('position', models.CharField(blank=True, max_length=64, null=True, verbose_name='ะดะพะปะถะฝะพัั‚ัŒ')),\n ('priority', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะฟั€ะธะพั€ะธั‚ะตั‚')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ],\n options={\n 'verbose_name': 'ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝะพะต ะปะธั†ะพ',\n 'verbose_name_plural': 'ะžั‚ะฒะตั‚ัั‚ะฒะตะฝะฝั‹ะต ะปะธั†ะฐ',\n 'ordering': ['-priority'],\n },\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, unique=True, verbose_name='ะ ะตะณะธะพะฝ')),\n ('for_menu', models.BooleanField(default=False, verbose_name='ะ”ะพะฑะฐะฒะธั‚ัŒ ะฒ ะผะตะฝัŽ')),\n ],\n options={\n 'verbose_name': 'ะ ะตะณะธะพะฝ',\n 'verbose_name_plural': 'ะ ะตะณะธะพะฝ',\n },\n ),\n migrations.CreateModel(\n name='Res',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=32, unique=True, verbose_name='ะ ะญะก')),\n ('short_name', models.CharField(max_length=16, unique=True, verbose_name='ะ ะญะก ัะพะบั€ะฐั‰ + ัƒั‡ะฐัั‚ะพะบ')),\n ('location', models.TextField(blank=True, verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ],\n options={\n 'verbose_name': 'ะ ะญะก',\n 'verbose_name_plural': 'ะ ะญะกั‹',\n 'ordering': ['name'],\n },\n ),\n migrations.CreateModel(\n name='Section',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('number', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='โ„– ัะตะบั†ะธะธ')),\n ('name', models.CharField(max_length=32, verbose_name='ะะฐะทะฒะฐะฝะธะต ัะตะบั†ะธะธ')),\n ('from_T', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะฟะธั‚ะฐะตั‚ัั ะพั‚/ะฟะธั‚ะฐะตั‚ ะข โ„–')),\n ('blind', models.BooleanField(default=False, verbose_name='ะขัƒะฟะธะบะพะฒะฐั')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ],\n options={\n 'verbose_name': 'ะกะตะบั†ะธั',\n 'verbose_name_plural': 'ะกะตะบั†ะธะธ',\n 'ordering': ['voltage__class_voltage', 'name'],\n },\n ),\n migrations.CreateModel(\n name='Subscriber',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, unique=True, verbose_name='ะะฐะทะฒะฐะฝะธะต ะพั€ะณะฐะฝะธะทะฐั†ะธะธ')),\n ('short_name', models.CharField(blank=True, max_length=16, null=True, verbose_name='ะะฐะทะฒ ะพั€ะณะฐะฝ ัะพะบั€ะฐั‰')),\n ('ours', models.BooleanField(verbose_name='ะฝะฐัˆะธ')),\n ('year_update', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะฟะธัะบะธ ะพะฑะฝะพะฒะปะตะฝั‹')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('region', models.ForeignKey(blank=True, default=2, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tula_net.region', verbose_name='ัƒั‡ะฐัั‚ะพะบ')),\n ],\n options={\n 'verbose_name': 'ะพั€ะณะฐะฝะธะทะฐั†ะธั',\n 'verbose_name_plural': 'ะพั€ะณะฐะฝะธะทะฐั†ะธะธ',\n 'ordering': ['-ours', 'name'],\n },\n ),\n migrations.CreateModel(\n name='TransmissionLine',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต')),\n ('full_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='ะŸะพะปะฝะพะต ะฝะฐะทะฒะฐะฝะธะต')),\n ('short_name', models.CharField(blank=True, default='', max_length=32, verbose_name='ะฆะธั„ั€ะพะฒะพะต ะฝะฐะทะฒะฐะฝะธะต')),\n ('length', models.FloatField(blank=True, null=True, verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ')),\n ('number_columns', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะšะพะปะธั‡ะตัั‚ะฒะพ ะพะฟะพั€')),\n ('description', models.TextField(blank=True, null=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('kvl', models.BooleanField(default=False, verbose_name='ะšะ’ะ›?')),\n ('group', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='lines0', to='tula_net.groupline', verbose_name='ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›')),\n ('maintenance', models.ManyToManyField(blank=True, related_name='lines_ved0', to='tula_net.Region', verbose_name='ะ’ะตะดะตะฝะธะต')),\n ('management', models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='lines_upr0', to='tula_net.region', verbose_name='ะฃะฟั€ะฐะฒะปะตะฝะธะต')),\n ('section', models.ManyToManyField(related_name='lines0', to='tula_net.Section', verbose_name='ะกะตะบั†ะธั')),\n ('subscriber', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lines0', to='tula_net.subscriber', verbose_name='ะฐะฑะพะฝะตะฝั‚')),\n ('voltage', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines0', to='tula_net.classvoltage', verbose_name='ะะฐะฟั€ัะถะตะฝะธะต')),\n ],\n options={\n 'verbose_name': 'ะ›ะธะฝะธั - ะœะžะ”ะ•ะ›ะฌ ะ—ะะœะ•ะะ•ะะ',\n 'verbose_name_plural': 'ะ›ะธะฝะธะธ - ะœะžะ”ะ•ะ›ะฌ ะ—ะะœะ•ะะ•ะะ',\n 'ordering': ['voltage', 'short_name'],\n },\n ),\n migrations.CreateModel(\n name='Substation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('number', models.PositiveSmallIntegerField(unique=True, verbose_name='ะะพะผะตั€ ะŸะก')),\n ('name', models.CharField(max_length=32, unique=True, verbose_name='ะะฐะทะฒะฐะฝะธะต ะŸะก')),\n ('alien', models.BooleanField(verbose_name='ั‡ัƒะถะฐั?')),\n ('location', models.TextField(blank=True, verbose_name='ะ ะฐัะฟะพะปะพะถะตะฝะธะต')),\n ('description', models.TextField(blank=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='substations', to='tula_net.group', verbose_name='ะ“ั€ัƒะฟะฟะฐ')),\n ('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='substations', to='tula_net.subscriber', verbose_name='ะ’ะปะฐะดะตะปะตั†')),\n ('region', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='substations', to='tula_net.region', verbose_name='ะฃั‡ะฐัั‚ะพะบ')),\n ('voltage_h', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ps_volt_h', to='tula_net.classvoltage', verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ะฒั‹ัะพะบะพะต')),\n ('voltage_l', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ps_volt_l', to='tula_net.classvoltage', verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ะฝะธะทะบะพะต')),\n ('voltage_m', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ps_volt_m', to='tula_net.classvoltage', verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต ัั€ะตะดะฝะตะต')),\n ],\n options={\n 'verbose_name': 'ะŸะพะดัั‚ะฐะฝั†ะธั',\n 'verbose_name_plural': 'ะŸะพะดัั‚ะฐะฝั†ะธะธ',\n 'ordering': ['number'],\n },\n ),\n migrations.AddField(\n model_name='section',\n name='substation',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sections', to='tula_net.substation'),\n ),\n migrations.AddField(\n model_name='section',\n name='voltage',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='tula_net.classvoltage', verbose_name='ะฝะฐะฟั€ัะถะตะฝะธะต'),\n ),\n migrations.CreateModel(\n name='Phone',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('number', models.CharField(max_length=20, verbose_name='ะฝะพะผะตั€')),\n ('search_number', models.CharField(blank=True, max_length=16, null=True, verbose_name='ะะ• ะ—ะะŸะžะ›ะะฏะขะฌ')),\n ('priority', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะฟั€ะธะพั€ะธั‚ะตั‚')),\n ('description', models.TextField(blank=True, verbose_name='ะพะฟะธัะตะฝะธะต')),\n ('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='tula_net.person', verbose_name='ะปะธั†ะพ')),\n ('subscriber', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='tula_net.subscriber', verbose_name='ะพั€ะณะฐะฝะธะทะฐั†ะธั')),\n ('substation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='tula_net.substation', verbose_name='ะŸะก')),\n ],\n options={\n 'verbose_name': 'ะขะตะปะตั„ะพะฝ',\n 'verbose_name_plural': 'ะขะตะปะตั„ะพะฝั‹',\n 'ordering': ['-priority'],\n },\n ),\n migrations.AddField(\n model_name='person',\n name='subscriber',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='persons', to='tula_net.subscriber'),\n ),\n migrations.CreateModel(\n name='Line',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต')),\n ('full_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='ะŸะพะปะฝะพะต ะฝะฐะทะฒะฐะฝะธะต')),\n ('short_name', models.CharField(blank=True, default='', max_length=32, verbose_name='ะฆะธั„ั€ะพะฒะพะต ะฝะฐะทะฒะฐะฝะธะต')),\n ('ps_p1', models.PositiveSmallIntegerField(verbose_name='ะŸะก โ„–1')),\n ('sec_p1', models.PositiveSmallIntegerField(verbose_name='ะกะจ ะŸะก โ„–1')),\n ('ps_p2', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะŸะก โ„–2')),\n ('sec_p2', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะจ ะŸะก โ„–2')),\n ('ps_m1', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะŸะก โ„–3')),\n ('sec_m1', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะจ ะŸะก โ„–3')),\n ('ps_m2', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะŸะก โ„–4')),\n ('sec_m2', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะจ ะŸะก โ„–4')),\n ('ps_m3', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะŸะก โ„–5')),\n ('sec_m3', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะจ ะŸะก โ„–5')),\n ('ps_m4', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะŸะก โ„–6')),\n ('sec_m4', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะจ ะŸะก โ„–6')),\n ('length', models.FloatField(blank=True, null=True, verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ')),\n ('number_columns', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะšะพะปะธั‡ะตัั‚ะฒะพ ะพะฟะพั€')),\n ('description', models.TextField(blank=True, null=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('kvl', models.BooleanField(default=False, verbose_name='ะšะ’ะ›?')),\n ('group', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='tula_net.groupline', verbose_name='ะฃั‡ะฐัั‚ะพะบ ัะป ะ’ะ›')),\n ('maintenance', models.ManyToManyField(blank=True, related_name='lines_ved', to='tula_net.Region', verbose_name='ะ’ะตะดะตะฝะธะต')),\n ('management', models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='lines_upr', to='tula_net.region', verbose_name='ะฃะฟั€ะฐะฒะปะตะฝะธะต')),\n ('subscriber', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lines', to='tula_net.subscriber', verbose_name='ะฐะฑะพะฝะตะฝั‚')),\n ('voltage', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='tula_net.classvoltage', verbose_name='ะะฐะฟั€ัะถะตะฝะธะต')),\n ],\n options={\n 'verbose_name': 'ะ›ะธะฝะธั',\n 'verbose_name_plural': 'ะ›ะธะฝะธะธ',\n 'ordering': ['voltage', 'short_name'],\n },\n ),\n migrations.CreateModel(\n name='Feeder',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, verbose_name='ะะฐะทะฒะฐะฝะธะต ั„ะธะดะตั€ะฐ')),\n ('try_number_name', models.SmallIntegerField(blank=True, null=True)),\n ('attention', models.BooleanField(default=False, verbose_name='!!!')),\n ('in_reserve', models.BooleanField(default=False, verbose_name='ะ ะตะทะตั€ะฒะฝั‹ะน')),\n ('description', models.TextField(blank=True, null=True, verbose_name='ะžะฟะธัะตะฝะธะต')),\n ('region', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='feeders', to='tula_net.region', verbose_name='ะฃั‡ะฐัั‚ะพะบ')),\n ('section', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='feeders', to='tula_net.section', verbose_name='ะกะบะจ')),\n ('subscriber', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='feeders', to='tula_net.subscriber', verbose_name='ะฐะฑะพะฝะตะฝั‚')),\n ('substation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='feeders', to='tula_net.substation', verbose_name='ะŸะก')),\n ],\n options={\n 'verbose_name': 'ั„ะธะดะตั€',\n 'verbose_name_plural': 'ั„ะธะดะตั€ะฐ',\n 'ordering': ['in_reserve', 'section', 'try_number_name', 'name'],\n },\n ),\n migrations.CreateModel(\n name='Feeder_characteristic',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('feeder_name', models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต ั„ะธะดะตั€ะฐ')),\n ('substation_name', models.CharField(max_length=64, verbose_name='ะะฐะทะฒะฐะฝะธะต ะŸะก')),\n ('length', models.FloatField(blank=True, null=True, verbose_name='ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ')),\n ('tp_our_num', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะขะŸ ะฝะฐัˆะธ: ะบะพะปะธั‡ะตัั‚ะฒะพ')),\n ('tp_alien_num', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะขะŸ ั‡ัƒะถะธะต: ะบะพะปะธั‡ะตัั‚ะฒะพ')),\n ('villages_num', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะะŸ ะบะพะปะธั‡ะตัั‚ะฒะพ')),\n ('villages_names', models.TextField(blank=True, null=True, verbose_name='ะะŸ ะฝะฐะทะฒะฐะฝะธั')),\n ('power_winter', models.FloatField(blank=True, null=True, verbose_name='ะ—ะธะผะฐ ะœะ’ั‚')),\n ('power_summer', models.FloatField(blank=True, null=True, verbose_name='ะ›ะตั‚ะพ ะœะ’ั‚')),\n ('population', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะะฐัะตะปะตะฝะธะต')),\n ('points', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะขะพั‡ะบะธ ะฟะพัั‚ะฐะฒะบะธ')),\n ('social_num', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะพั†ะธะฐะปะบะฐ ะบะพะป-ะฒะพ')),\n ('social_names', models.TextField(blank=True, null=True, verbose_name='ะกะพั†ะธะฐะปะบะฐ ั‚ะธะฟ')),\n ('checked', models.BooleanField(default=False, verbose_name='ัะพะพั‚ะฒะตั‚ัั‚ะฒัƒะตั‚')),\n ('feeder', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='character', to='tula_net.feeder', verbose_name='ั„ะธะดะตั€')),\n ],\n options={\n 'verbose_name': 'ั…ะฐั€ะฐะบั‚ะตั€ะธัั‚ะธะบะฐ ั„ะธะดะตั€ะฐ',\n 'verbose_name_plural': 'ั…-ะบะธ ั„ะธะดะตั€ะพะฒ',\n 'unique_together': {('feeder_name', 'substation_name')},\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6789129376411438, "alphanum_fraction": 0.6829391121864319, "avg_line_length": 33.27586364746094, "blob_id": "1e079a22076de3fd133f1ad91a0485683266e9bf", "content_id": "ab207591744503664be8017cb08e8d2036c10ad2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1987, "license_type": "no_license", "max_line_length": 74, "num_lines": 58, "path": "/Tula_Networks/Tula_Networks/static/scripts/ph3.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "let btnAdds = document.querySelectorAll('.btn_add');\nlet btnRemoves = document.querySelectorAll('.btn_remove');\nlet phoneBar = document.querySelector('.phone_bar');\nlet pb = JSON.stringify(phoneBarFunc(phoneBar));\n//let listPhones = 'xz';\n\n\nfunction copyPhone(btn) {\n let bigParent = btn.parentElement.parentElement.parentElement;\n let dublParent = bigParent.cloneNode(true);\n dublParent.classList.remove('col-6', 'col-md-6', 'col-lg-3', 'mb-2');\n dublParent.classList.add('col-12', 'mb-2', 'phone_in_bar');\n let aRemove = dublParent.querySelector('.btn_remove');\n let aAdd = dublParent.querySelector('.btn_add');\n let btnUp = dublParent.querySelector('.btn_up');\n let btnDown = dublParent.querySelector('.btn_down');\n let delWhenAdd = dublParent.querySelector('.del-when-add');\n// btnUp.classList.remove('display_none');\n// btnDown.classList.remove('display_none');\n aRemove.classList.remove('display_none');\n aAdd.classList.add('display_none');\n// delWhenAdd.classList.add('display_none');\n return dublParent;\n}\n\n\nfunction removePhoneFromBar(btn) {\n btn.addEventListener('click', function(evt) {\n evt.preventDefault();\n let removeFromBar = btn.parentElement.parentElement.parentElement;\n removeFromBar.remove();\n })\n}\n\n\nfunction phoneBarFunc(bar) {\n btnAdds.forEach(item => {\n item.addEventListener('click', function(evt) {\n evt.preventDefault();\n let phoneForBar = copyPhone(item);\n let removeBtn = phoneForBar.querySelector('.btn_remove');\n removePhoneFromBar(removeBtn);\n phoneBar.appendChild(phoneForBar);\n\n })\n })\n return phoneBar;\n }\n\n//let pb = JSON.stringify(phoneBarFunc(phoneBar));\nconsole.log('removeBtn4');\nlocalStorage.setItem('phoneBar', pb)\n\nlocalStorage.getItem('phoneBar')\n\nlocalStorage.setItem('test', \"phoneBarFunc(phoneBar)\")\nlocalStorage.getItem('test')\nconsole.log(localStorage.getItem('phoneBar'))" }, { "alpha_fraction": 0.5418397784233093, "alphanum_fraction": 0.5418397784233093, "avg_line_length": 30.79245376586914, "blob_id": "1c89efa1a7ca88339bb3dc2c0150c8291b018464", "content_id": "a97975610db02d53475ed6f41aa56aef105cc8d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1705, "license_type": "no_license", "max_line_length": 72, "num_lines": 53, "path": "/Tula_Networks/Tula_Networks/static/scripts/earth3.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "let pointEarth = document.querySelector('.pointEarth')\n\nif (pointEarth) {\n let earthFeeders = document.querySelectorAll('.earthFeeder')\n let colorMen = document.querySelectorAll('.color-man');\n \n // console.log(colorMen);\n \n function classAddRem(elem, add, rem) {\n elem.classList.add(add);\n elem.classList.remove(rem);\n }\n \n pointEarth.onclick = function () {\n if (pointEarth.classList.contains('btn-outline-primary')) {\n classAddRem(pointEarth, 'btn-danger', 'btn-outline-primary')\n pointEarth.textContent = '!ะ˜ะฉะ•ะœ ะ—ะ•ะœะ›ะฎ!';\n earthFeeders.forEach(item => {\n classAddRem(item, 'btn-danger', 'btn-light')\n })\n } else {\n classAddRem(pointEarth, 'btn-outline-primary', 'btn-danger')\n pointEarth.textContent = 'ะ˜ัะบะฐั‚ัŒ ะทะตะผะปัŽ';\n earthFeeders.forEach(item => {\n classAddRem(item, 'btn-light', 'btn-danger')\n })\n }\n }\n \n earthFeeders.forEach(item => {\n item.onclick = function (event) {\n if (pointEarth.classList.contains('btn-danger')) {\n event.preventDefault();\n if (item.classList.contains('btn-danger')) {\n classAddRem(item, 'btn-success', 'btn-danger')\n } else {\n classAddRem(item, 'btn-danger', 'btn-success')\n }\n }\n }\n })\n \n \n colorMen.forEach( man => {\n man.addEventListener('click', (evt) => {\n if (pointEarth.classList.contains('btn-danger')) {\n evt.preventDefault();\n man.classList.toggle('bg-warning')\n }\n })\n }\n )\n}\n" }, { "alpha_fraction": 0.6393592953681946, "alphanum_fraction": 0.642633318901062, "avg_line_length": 34.06666564941406, "blob_id": "295898f3a15a3f13ab877dad5eaf8542a1b3c90b", "content_id": "6ae4f526965794c7b025ae6d5d9a3fec296ed3ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29453, "license_type": "no_license", "max_line_length": 119, "num_lines": 810, "path": "/Tula_Networks/tula_net/views.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django.contrib.auth import login\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views import View\nfrom django import forms\nfrom django.db.models import Count, Min, Sum, Avg\n\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView\n\nfrom .forms import FeederAddFromPSForm, FeederFormUpd, PhoneSubscriberFormAdd, PhonePersonFormAdd, PhoneFormUpd, \\\n PhonePSFormAdd, SubscriberFormAdd, PersonFormAdd, SubstationFormUpd, FeederAddFromSubscriberForm, SectionAddForm, \\\n Line1Form, UserAutForm, FeederCharForm\n\nfrom .models import Substation, Subscriber, Section, Person, Phone, Feeder, Group, Region, \\\n ClassVoltage, GroupLine, Line, Feeder_characteristic\nfrom dal import autocomplete\n\nfrom .utils import AddPhoneViewMixin, DeleteObjectMixin, SubstationsViewMixin, FeedersViewMixin, AddFeederMixin, \\\n chang_search, make_digits, Lines1ViewMixin, SearchMixin, try_int, try_number_feeder\n\n\n# from .data import context_menu\n\n\nclass MainView(View):\n \"\"\" ะณะปะฐะฒะฝะฐั \"\"\"\n\n def get(self, request, *args, **kwargs):\n return render(request, 'tula_net/index.html')\n\nclass Map(ListView):\n\n context_object_name = 'substations'\n template_name = 'tula_net/map.html'\n\n def get_queryset(self):\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l').exclude(group__pk=4)\n\nclass YMap(ListView):\n\n context_object_name = 'substations'\n template_name = 'tula_net/y-map.html'\n\n def get_queryset(self):\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l').exclude(group__pk=4)\n\n\n\nclass PsListView(SubstationsViewMixin, ListView):\n \"\"\" ะฒัŒัŽั…ะฐ ะดะปั ะฒัะตั… ะŸะก \"\"\"\n paginate_by = 20\n\n\nclass GroupPSView(SubstationsViewMixin, ListView):\n \"\"\" ะฒัŒัŽั…ะฐ ะดะปั ะŸะก ั ั€ะฐะทะฑะธะฒะบะพะน ะฟะพ ะณั€ัƒะฟะฟะต \"\"\"\n flag = 'flag_group'\n\n def get_queryset(self):\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l'). \\\n filter(group__pk=self.kwargs['pk'])\n\n\nclass VoltPSView(SubstationsViewMixin, ListView):\n \"\"\" ะฒัŒัŽั…ะฐ ะดะปั ะŸะก ั ั€ะฐะทะฑะธะฒะบะพะน ะฟะพ ะฝะฐะฟั€ัะถะตะฝะธัŽ \"\"\"\n flag = 'flag_voltages'\n paginate_by = 20\n\n def get_queryset(self):\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l'). \\\n filter(voltage_h__pk=self.kwargs['pk'])\n\n\nclass SubstationsBySubscriberView(ListView):\n \"\"\" ะŸะก ะฟะพ ะฟะพ ะฐะฑะพะฝะตะฝั‚ะฐะผ ัะพ ัะฟะธัะบะพะผ ั„ะธะดะตั€ะพะฒ \"\"\"\n\n context_object_name = 'substations'\n template_name = 'tula_net/substations_by_ss.html'\n\n def get_queryset(self):\n return Substation.objects.filter(feeders__subscriber__pk=self.kwargs['pk']). \\\n prefetch_related('feeders', 'feeders__subscriber', 'feeders__section__voltage')\n\n \"\"\" context['the_subscriber'] - ั‚ะพั‚ ะฐะฑะพะฝะตะฝั‚ ะดะปั ะบะพั‚ะพั€ะพะณะพ ะฒั‹ะฒะพะดัั‚ัั ะŸะก ะธ ั„ะธะดะตั€ะฐ \"\"\"\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['the_subscriber'] = Subscriber.objects.get(pk=self.kwargs['pk'])\n return context\n\n\nclass ResPS(ListView):\n pass\n\n\nclass OnePSView(DetailView):\n \"\"\" ะบะฐั€ั‚ะพั‡ะบะฐ ะพะดะฝะพะน ะฟั \"\"\"\n\n template_name = 'tula_net/one_ps.html'\n context_object_name = 'ps'\n\n def get_queryset(self):\n return Substation.objects.annotate(\n tp_ours_sum=Sum('feeders__character__tp_our_num'),\n tp_alien_sum=Sum('feeders__character__tp_alien_num'),\n length_sum=Sum('feeders__character__length'),\n villages_sum=Sum('feeders__character__villages_num'),\n power_winter_sum=Sum('feeders__character__power_winter'),\n power_summer_sum=Sum('feeders__character__power_summer'),\n population_sum=Sum('feeders__character__population'),\n points_sum=Sum('feeders__character__points'),\n social_sum=Sum('feeders__character__social_num'),\n )\\\n .select_related('group', 'voltage_h', 'voltage_m', 'voltage_l'). \\\n prefetch_related('sections', 'feeders', 'sections__voltage', 'phones', 'sections__feeders')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['lines'] = Line.objects.filter(\n Q(ps_p1=self.object.number) |\n Q(ps_p2=self.object.number) |\n Q(ps_m1=self.object.number) |\n Q(ps_m2=self.object.number) |\n Q(ps_m3=self.object.number) |\n Q(ps_m4=self.object.number)\n ).select_related('voltage')\n return context\n\n\nclass SectionListView(ListView):\n \"\"\" ะฒัะต ัะตะบั†ะธะธ ั ั„ะธะดะตั€ะฐะผะธ - ะฑะตัะฟะพะปะตะทะฝะฐั) \"\"\"\n # model = Section\n template_name = 'tula_net/section.html'\n context_object_name = 'sections'\n\n def get_queryset(self):\n return Section.objects.prefetch_related('feeders').select_related('voltage', 'substation').all()\n\n\n# ____________ั„ะธะดะตั€ะฐ_____________\nclass FeedersView(ListView):\n \"\"\"ะดะปั ะฟะพะธัะบ ั„ะธะดะตั€ะพะฒ\"\"\"\n context_object_name = 'feeders'\n template_name = 'tula_net/feeder_search.html'\n paginate_by = 28\n\n def get_queryset(self):\n return Feeder.objects.select_related('substation', 'section', 'subscriber', 'section__voltage')\n\n\nclass OneFeederView(DetailView):\n template_name = 'tula_net/feeder.html'\n context_object_name = 'feeder'\n\n def get_queryset(self):\n return Feeder.objects.select_related('subscriber', 'section', 'substation', 'character'). \\\n prefetch_related('subscriber__phones', 'subscriber__persons__phones')\n\n\nclass AllFeedersView(FeedersViewMixin, ListView):\n \"\"\" ะฒัะต ั„ะธะดะตั€ะฐ \"\"\"\n paginate_by = 10\n\n\nclass OneSubstationView(FeedersViewMixin, ListView):\n \"\"\" ะพะดะฝะฐ ะŸะก - ะปะธัั‚ ั„ะธะดะตั€ะพะฒ \"\"\"\n second_model = Substation\n the_context = 'the_substation'\n\n def get_queryset(self):\n return Feeder.objects.select_related('substation', 'section', 'subscriber', 'section__voltage', 'character'). \\\n filter(substation__pk=self.kwargs['pk'])\n\n\n#__________ ั…ะฐั€ะบะธ ั„ะธะดะตั€ะพะฒ__________\nclass CharsView(ListView):\n context_object_name = 'chars'\n template_name = 'tula_net/chars.html'\n\n def get_queryset(self):\n return Feeder_characteristic.objects.select_related('feeder')\n\n\nclass OneCharsView(DetailView):\n \"\"\" ะบะฐั€ั‚ะพั‡ะบะฐ ะพะดะฝะพะน ั…-ะบะธ \"\"\"\n template_name = 'tula_net/one_char.html'\n context_object_name = 'character'\n\n def get_queryset(self):\n return Feeder_characteristic.objects.select_related('feeder')\n\n\n# __________ ัะตะบั†ะธะธ _____________\nclass OneSectionView(FeedersViewMixin, ListView):\n \"\"\" ะพะดะฝะฐ ัะตะบั†ะธั - ะปะธัั‚ ั„ะธะดะตั€ะพะฒ \"\"\"\n second_model = Section\n the_context = 'the_section'\n\n def get_queryset(self):\n return Feeder.objects.select_related('substation', 'section', 'subscriber', 'section__voltage', 'character'). \\\n filter(section__pk=self.kwargs['pk'])\n\n\nclass Section1View(DetailView):\n \"\"\" ะพะดะฝะฐ ัะตะบั†ะธั - ะพะฑัŠะตะบั‚ \"\"\"\n context_object_name = 'section'\n template_name = 'tula_net/one_section.html'\n\n def get_queryset(self):\n return Section.objects.annotate(\n tp_ours_sum=Sum('feeders__character__tp_our_num'),\n tp_alien_sum=Sum('feeders__character__tp_alien_num'),\n length_sum=Sum('feeders__character__length'),\n villages_sum=Sum('feeders__character__villages_num'),\n power_winter_sum=Sum('feeders__character__power_winter'),\n power_summer_sum=Sum('feeders__character__power_summer'),\n population_sum=Sum('feeders__character__population'),\n points_sum=Sum('feeders__character__points'),\n social_sum=Sum('feeders__character__social_num'),\n ).prefetch_related('feeders', 'voltage', 'substation')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n \"\"\"ะปะธะฝะธะธ ะฟะพ ัะตะบั†ะธัะผ - ะผะพะถะฝะพ ะฟั€ะพั‰ะต, ะฝะพ 400 ะบะฒะตั€ะธั ะฟั€ะธ ะดะพะฑะฐะฒะปะตะฝะธะธ ะพะฑัŠะตะบั‚ะฐ ะ’ะ›........\"\"\"\n context['lines'] = Line.objects.filter(\n Q(ps_p1=self.object.substation.number, sec_p1=self.object.number, voltage=self.object.voltage) |\n Q(ps_p2=self.object.substation.number, sec_p2=self.object.number, voltage=self.object.voltage) |\n Q(ps_m1=self.object.substation.number, sec_m1=self.object.number, voltage=self.object.voltage) |\n Q(ps_m2=self.object.substation.number, sec_m2=self.object.number, voltage=self.object.voltage) |\n Q(ps_m3=self.object.substation.number, sec_m3=self.object.number, voltage=self.object.voltage) |\n Q(ps_m4=self.object.substation.number, sec_m4=self.object.number, voltage=self.object.voltage)\n )\n return context\n\n\n# _________ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ _____________\nclass OneSubscriberView(DetailView):\n template_name = 'tula_net/one_subscriber.html'\n context_object_name = 'subscriber'\n\n def get_queryset(self):\n return Subscriber.objects.prefetch_related('phones', 'persons', 'persons__phones', 'substations', 'lines')\n\n\nclass SubscriberListView(ListView):\n model = Subscriber\n context_object_name = 'subscribers'\n template_name = 'tula_net/subscribers_all.html'\n paginate_by = 50\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass SubscribersBySectionView(ListView):\n context_object_name = 'subscribers'\n template_name = 'tula_net/subscribers.html'\n\n def get_queryset(self):\n return Subscriber.objects.prefetch_related(\n 'phones', 'persons', 'persons__phones', 'feeders',\n 'feeders__substation', 'feeders__section', 'feeders__section__voltage'\n ).filter(feeders__section__pk=self.kwargs['pk'])\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['the_section'] = Section.objects.select_related('substation').get(pk=self.kwargs['pk'])\n return context\n\n\nclass SubscribersByPSView(ListView):\n \"\"\" ะพะดะฝะฐ ะŸะก ัะพ ัะฟะธัะบะพะผ ะฒัะตั… ะฐะฑะพะฝะตะฝั‚ะพะฒ +\n ะฒัะต ะฐะฑะพะฝะตะฝั‚ั‹ ัะพ ัะฟะธัะบะพะผ ะฒัะตั… ั„ะธะดะตั€ะพะฒ ะฟะพ ะญะขะžะ™ ะŸะก \"\"\"\n context_object_name = 'subscribers'\n template_name = 'tula_net/subscribers.html'\n\n def get_queryset(self):\n return Subscriber.objects.prefetch_related(\n 'phones', 'persons', 'persons__phones', 'feeders',\n 'feeders__substation', 'feeders__section', 'feeders__section__voltage'\n ).filter(feeders__substation__pk=self.kwargs['pk'])\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['the_substation'] = Substation.objects.get(pk=self.kwargs['pk'])\n return context\n\n\n# _________________ ะปัŽะดะธ __________________\nclass PersonListView(ListView):\n \"\"\" ัะฟะธัะพะบ ะฒัะตั… ะปัŽะดะตะน \"\"\"\n template_name = 'tula_net/persons.html'\n context_object_name = 'persons'\n paginate_by = 24\n\n def get_queryset(self):\n return Person.objects.select_related('subscriber')\n\n\nclass OnePersonView(DetailView):\n \"\"\" ะพะดะธะฝ ั‡ะตะปะพะฒะตะบ \"\"\"\n model = Person\n template_name = 'tula_net/one_person.html'\n context_object_name = 'person'\n\n def get_queryset(self):\n return Person.objects.select_related('subscriber').prefetch_related('phones', 'subscriber__phones')\n\n\n# ____________ ั‚ะตะปะตั„ะพะฝั‹ _______________\nclass PhoneListView(ListView):\n template_name = 'tula_net/phones.html'\n context_object_name = 'phones'\n paginate_by = 32\n\n def get_queryset(self):\n return Phone.objects.select_related('subscriber', 'person', 'substation')\n\n\nclass OnePhoneView(DetailView):\n \"\"\" ะพะดะธะฝ ั‚ะตะปะตั„ะพะฝ \"\"\"\n template_name = 'tula_net/one_phone.html'\n context_object_name = 'phone'\n\n def get_queryset(self):\n return Phone.objects.select_related('subscriber', 'person', 'substation'). \\\n prefetch_related('person__phones', 'subscriber__phones', 'substation__phones')\n\n\n# __________________ ะŸะพะธัะบะธ ____________________\nclass SearcherSubscribersView(SearchMixin, ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ะฐะฑะพะฝะตะฝั‚ะฐะผ \"\"\"\n context_object_name = 'subscribers'\n template_name = 'tula_net/subscribers_all.html'\n paginate_by = 20\n\n def get_queryset(self):\n return Subscriber.objects.filter(\n Q(name__icontains=self.request.GET.get('s')) |\n Q(short_name__icontains=self.request.GET.get('s'))\n )\n\n\nclass SearcherPSView(SubstationsViewMixin, ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ะฟะพะดัั‚ะฐะฝั†ะธัะผ \"\"\"\n paginate_by = 20\n\n def get_queryset(self):\n name = self.request.GET.get('s')\n return Substation.objects.select_related('group', 'voltage_h', 'voltage_m', 'voltage_l').filter(\n Q(name__icontains=name) |\n Q(number=try_int(name))\n )\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"s={self.request.GET.get('s')}&\"\n context['flag_search'] = self.request.GET.get('s')\n return context\n\n\nclass SearcherPersonsView(SearchMixin, ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ะปัŽะดัะผ \"\"\"\n context_object_name = 'persons'\n template_name = 'tula_net/persons.html'\n paginate_by = 18\n\n def get_queryset(self):\n return Person.objects.select_related('subscriber').filter(name__icontains=self.request.GET.get('s'))\n\n\nclass SearcherPhonesView(SearchMixin, ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ั‚ะตะปะตั„ะพะฝะฐะผ \"\"\"\n context_object_name = 'phones'\n template_name = 'tula_net/phones.html'\n paginate_by = 20\n\n def get_queryset(self):\n digits = make_digits(self.request.GET.get('f'))\n return Phone.objects.select_related('subscriber', 'substation', 'person').filter(\n Q(number__icontains=self.request.GET.get('f')) |\n Q(search_number__icontains=digits)\n )\n \n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"f={self.request.GET.get('f')}&\"\n context['flag_search'] = self.request.GET.get('f')\n return context\n\n\nclass SearcherPhonesToNamesView(SearchMixin, ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ั‚ะตะปะตั„ะพะฝะฐะผ \"\"\"\n context_object_name = 'phones'\n template_name = 'tula_net/phones.html'\n paginate_by = 20\n\n def get_queryset(self):\n name = self.request.GET.get('s')\n return Phone.objects.select_related('subscriber', 'substation', 'person').filter(\n Q(subscriber__short_name__icontains=name) |\n Q(subscriber__name__icontains=name) |\n Q(substation__name__icontains=name) |\n Q(substation__number=try_int(name)) |\n Q(person__name__icontains=name)\n )\n\nclass SearcherLinesView(ListView):\n \"\"\" ะŸะพะธัะบ ะฟะพ ะปะธะฝะธัะผ \"\"\"\n context_object_name = 'lines'\n template_name = 'tula_net/lines1.html'\n\n def get_queryset(self):\n obj_serch = self.request.GET.get('s')\n obj_serch_n = chang_search(obj_serch)\n return Line.objects.select_related('management', 'voltage', 'group').filter(\n Q(full_name__icontains=obj_serch) |\n Q(name__icontains=obj_serch) |\n Q(short_name__icontains=obj_serch) |\n Q(short_name__icontains=obj_serch_n)\n )\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"s={self.request.GET.get('s')}&\"\n context['flag_search'] = self.request.GET.get('s')\n context['groups'] = GroupLine.objects.all()\n context['voltages'] = ClassVoltage.objects.all()[1:3]\n context['regions'] = Region.objects.filter(for_menu=True)\n return context\n\n\nclass SearcherFeedersView(SearchMixin, ListView):\n context_object_name = 'feeders'\n template_name = 'tula_net/feeder_search.html'\n paginate_by = 20\n\n def get_queryset(self):\n return Feeder.objects.filter(Q(name__icontains=self.request.GET.get('s'))).\\\n select_related('substation', 'subscriber')\n\n\n# ___________________ ะคะžะ ะœะซ ____________________\n## __________________ ั„ะธะดะตั€ั‹ ____________________\nclass AddFeederFromPSView(AddFeederMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั ะŸะก \"\"\"\n form_feeder = FeederAddFromPSForm\n first_model = Substation\n first_field = 'substation'\n second_field = 'section'\n\n\nclass AddFeederFromSecView(AddFeederMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั ะกะบะจ \"\"\"\n form_feeder = FeederAddFromPSForm\n first_model = Section\n first_field = 'section'\n second_field = 'substation'\n\n\nclass AddFeederFromSubscriberView(AddFeederMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ะพั‚ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ \"\"\"\n form_feeder = FeederAddFromSubscriberForm\n first_model = Subscriber\n first_field = 'subscriber'\n\n\nclass UpdFeederView(View):\n \"\"\" ะธะทะผะตะฝะตะฝะธะต ั„ะธะดะตั€ะฐ\"\"\"\n\n def get(self, request, pk):\n feeder = Feeder.objects.get(pk=pk)\n form = FeederFormUpd(instance=feeder)\n form.fields['try_number_name'].widget = forms.HiddenInput()\n form.fields['section'].queryset = Section.objects.\\\n filter(substation__feeders__pk=pk, voltage__class_voltage__lt=11)\n form.fields['substation'].queryset = Substation.objects.filter(feeders__pk=pk)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, pk):\n feeder = Feeder.objects.get(pk=pk)\n form = FeederFormUpd(request.POST, instance=feeder)\n if form.is_valid():\n feeder = form.save(commit=False)\n try_num = form.cleaned_data['name']\n feeder.try_number_name = try_number_feeder(try_num)\n feeder.save()\n return redirect(feeder)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n\n# _________ั…ะฐั€ะบะธ ั„ะธะดะตั€ะพะฒ_________\nclass AddCharacterFeederView(View):\n\n def get(self, request, pk):\n form = FeederCharForm()\n form.fields['feeder'].initial = Feeder.objects.get(pk=pk)\n form.fields['feeder'].queryset = Feeder.objects.filter(pk=pk)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, pk):\n bound_form = FeederCharForm(request.POST)\n if bound_form.is_valid():\n charact = bound_form.save()\n return redirect(Feeder.objects.get(pk=pk))\n return render(request, 'tula_net/form_add_person.html', context={'form': bound_form})\n\n\nclass UpdCharacterFeederView(View):\n \"\"\" ะฟั€ะฐะฒะบะฐ ั ั„ะธะดะตั€ะฐ \"\"\"\n def get(self, request, pk):\n charact = Feeder_characteristic.objects.get(feeder__pk=pk)\n form = FeederCharForm(instance=charact)\n form.fields['feeder'].queryset = Feeder.objects.filter(pk=pk)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, pk):\n charact = Feeder_characteristic.objects.get(feeder__pk=pk)\n bound_form = FeederCharForm(request.POST, instance=charact)\n if bound_form.is_valid():\n charact = bound_form.save()\n return redirect(Feeder.objects.get(pk=pk))\n return render(request, 'tula_net/form_add_person.html', context={'form': bound_form})\n\n\nclass UpdCharacterNoFeederView(View):\n \"\"\" ะฟั€ะฐะฒะบะฐ ั ะปะธัะบะฐ, ะตะพะณะดะฟ ะบ ั„ะธะดะตั€ัƒ ะฝะต ะฟั€ะธะฒัะทะฐะฝ \"\"\"\n def get(self, request, pk):\n charact = Feeder_characteristic.objects.get(pk=pk)\n form = FeederCharForm(instance=charact)\n form.fields['feeder'].queryset = Feeder.objects.filter(substation__name=charact.substation_name)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, pk):\n charact = Feeder_characteristic.objects.get(pk=pk)\n bound_form = FeederCharForm(request.POST, instance=charact)\n if bound_form.is_valid():\n charact = bound_form.save()\n return redirect(Feeder.objects.get(character__pk=pk))\n return render(request, 'tula_net/form_add_person.html', context={'form': bound_form})\n\n\n## __________________ ั‚ะตะปะตั„ะพะฝั‹ ____________________\nclass AddSubscriberPhoneView(AddPhoneViewMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ\"\"\"\n model = Subscriber\n form_x = PhoneSubscriberFormAdd\n\n\nclass AddPersonPhoneView(AddPhoneViewMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะปะธั†ะฐ\"\"\"\n model = Person\n form_x = PhonePersonFormAdd\n\n\nclass AddPSPhoneView(AddPhoneViewMixin, View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะŸะก\"\"\"\n model = Substation\n form_x = PhonePSFormAdd\n\n\nclass UpdPhoneView(View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ\"\"\"\n\n def get(self, request, pk):\n phone = Phone.objects.get(pk=pk)\n form = PhoneFormUpd(instance=phone)\n form.fields['search_number'].widget = forms.HiddenInput()\n return render(request, 'tula_net/form_add_phone.html', context={'form': form})\n\n def post(self, request, pk):\n phone = Phone.objects.get(pk=pk)\n bound_form = PhoneFormUpd(request.POST, instance=phone)\n if bound_form.is_valid():\n new_phone = bound_form.save()\n return redirect(new_phone)\n return render(request, 'tula_net/form_add_phone.html', context={'form': bound_form})\n\n\nclass PhoneDeleteView(DeleteObjectMixin, View):\n \"\"\" ัƒะดะฐะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ\"\"\"\n model = Phone\n target_reverse = 'phones'\n\n\nclass FeederDeleteView(DeleteObjectMixin, View):\n \"\"\" ัƒะดะฐะปะตะฝะธะต ั„ะธะดะตั€ะฐ\"\"\"\n model = Feeder\n target_reverse = 'main'\n\n\n# _______________ ะคะพั€ะผั‹ ะžั€ะณะฐะฝะธะทะฐั†ะธะธ _____________\nclass AddSubscriberView(CreateView):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ะพั€ะณะฐะฝะธะทะฐั†ะธะธ\"\"\"\n model = Subscriber\n form_class = SubscriberFormAdd\n template_name = 'tula_net/form_add_subscriber.html'\n\n\nclass UpdSubscriberView(UpdateView):\n \"\"\" ะธะทะผะตะฝะตะฝะธะต ะพั€ะณะฐะฝะธะทะฐั†ะธะธ\"\"\"\n model = Subscriber\n form_class = SubscriberFormAdd\n template_name = 'tula_net/form_add_subscriber.html'\n\n\nclass SubscriberDeleteView(DeleteObjectMixin, View):\n \"\"\" ัƒะดะฐะปะตะฝะธะต ะพั€ะณะฐะฝะธะทะฐั†ะธะธ\"\"\"\n model = Subscriber\n target_reverse = 'subscribers'\n\n\n# _______________ ะคะพั€ะผั‹ ะžั€ะณะฐะฝะธะทะฐั†ะธะธ _____________\nclass AddPersonView(View):\n\n def get(self, request, *args, **kwargs):\n form = PersonFormAdd()\n form.fields[\"subscriber\"].queryset = Subscriber.objects.filter(pk=self.kwargs['pk'])\n return render(request, 'tula_net/form_add_person.html', context={'form': form})\n\n def post(self, request, *args, **kwargs):\n bound_form = PersonFormAdd(request.POST)\n if bound_form.is_valid():\n new_person = bound_form.save()\n return redirect(new_person)\n return render(request, 'tula_net/form_add_person.html', context={'form': bound_form})\n\n\nclass UpdPersonView(UpdateView):\n model = Person\n form_class = PersonFormAdd\n template_name = 'tula_net/form_add_person.html'\n\n\nclass DelPersonView(DeleteObjectMixin, View):\n model = Person\n target_reverse = 'persons'\n\n\n# _______________ ะคะพั€ะผั‹ ะŸะพะดัั‚ะฐะฝั†ะธะธ _____________\n\nclass AddSubstationView(CreateView):\n model = Substation\n form_class = SubstationFormUpd\n template_name = 'tula_net/form_add_person.html'\n\n\nclass UpdSubstationView(UpdateView):\n model = Substation\n form_class = SubstationFormUpd\n template_name = 'tula_net/form_add_person.html'\n\n\nclass AddSectionFromPSView(View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั„ะธะดะตั€ะฐ c ะŸะก !!! ะธ ะพะฝะพ ั€ะฐะฑะพั‚ะฐะตั‚ !!!\"\"\"\n\n def get(self, request, pk):\n form = SectionAddForm()\n form.fields[\"substation\"].queryset = Substation.objects.filter(pk=pk)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n\n def post(self, request, *args, **kwargs):\n bound_form = SectionAddForm(request.POST)\n if bound_form.is_valid():\n new_feeder = bound_form.save()\n return redirect(new_feeder)\n return render(request, 'tula_net/form_add_feeder.html', context={'form': bound_form})\n\n\nclass UpdSectionView(UpdateView):\n model = Section\n form_class = SectionAddForm\n template_name = 'tula_net/form_add_feeder.html'\n\n\nclass SectionDeleteView(DeleteObjectMixin, View):\n \"\"\" ัƒะดะฐะปะตะฝะธะต ัะตะบั†ะธะธ\"\"\"\n model = Section\n target_reverse = 'main'\n\n\n# ___________________\n\"\"\"\nclass SubscriberAutocompleteView(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n return Subscriber.objects.none()\n qs = Subscriber.objects.all()\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass SubstationAutocompleteView(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n return Substation.objects.none()\n qs = Substation.objects.all()\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\"\"\"\n# ________________________\n\n# class LinesView(LinesViewMixin, ListView):\n# paginate_by = 12\n\n\nclass Lines1View(Lines1ViewMixin, ListView):\n paginate_by = 15\n\n\nclass LinesGroupView(Lines1ViewMixin, ListView):\n flag = 'flag_group'\n paginate_by = 15\n\n def get_queryset(self):\n return Line.objects.select_related('management', 'voltage', 'group'). \\\n filter(group__pk=self.kwargs['pk'])\n\n\nclass LinesVoltageView(Lines1ViewMixin, ListView):\n flag = 'flag_voltages'\n paginate_by = 15\n\n def get_queryset(self):\n return Line.objects.select_related('management', 'voltage', 'group'). \\\n filter(voltage__pk=self.kwargs['pk'])\n\n\nclass LinesRegionView(Lines1ViewMixin, ListView):\n flag = 'flag_region'\n paginate_by = 15\n\n def get_queryset(self):\n return Line.objects.select_related('management', 'voltage', 'group'). \\\n filter(management__pk=self.kwargs['pk'])\n\n\nclass OneLine1View(DetailView):\n model = Line\n context_object_name = 'line'\n template_name = 'tula_net/one_line1.html'\n\n def get_queryset(self):\n return Line.objects.select_related('voltage', 'group', 'voltage', 'subscriber', 'management'). \\\n prefetch_related('maintenance')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['plus'] = Section.objects.select_related('substation'). \\\n filter(Q(substation__number=self.object.ps_p1, number=self.object.sec_p1, voltage=self.object.voltage) |\n Q(substation__number=self.object.ps_p2, number=self.object.sec_p2, voltage=self.object.voltage))\n\n context['minus'] = Section.objects.select_related('substation'). \\\n filter(Q(substation__number=self.object.ps_m1, number=self.object.sec_m1, voltage=self.object.voltage) |\n Q(substation__number=self.object.ps_m2, number=self.object.sec_m2, voltage=self.object.voltage) |\n Q(substation__number=self.object.ps_m3, number=self.object.sec_m3, voltage=self.object.voltage) |\n Q(substation__number=self.object.ps_m4, number=self.object.sec_m4, voltage=self.object.voltage)\n )\n return context\n\n\nclass AddLine1View(View):\n \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั„ะธะดะตั€ะฐ c ะŸะก !!! ะธ ะพะฝะพ ั€ะฐะฑะพั‚ะฐะตั‚ !!!\"\"\"\n\n def get(self, request):\n form = Line1Form()\n form.fields['voltage'].queryset = ClassVoltage.objects.filter(pk__gt=2)\n return render(request, 'tula_net/form_add_line.html', context={'form': form})\n\n def post(self, request, *args, **kwargs):\n bound_form = Line1Form(request.POST)\n if bound_form.is_valid():\n new_line = bound_form.save()\n return redirect(new_line)\n return render(request, 'tula_net/form_add_line.html', context={'form': bound_form})\n\n\nclass UpdLineView(UpdateView):\n model = Line\n form_class = Line1Form\n template_name = 'tula_net/form_add_line.html'\n\n\nclass LineDeleteView(DeleteObjectMixin, View):\n \"\"\" ัƒะดะฐะปะตะฝะธะต ะ’ะ›\"\"\"\n model = Line\n target_reverse = 'main'\n\n\n\nclass MyLogin(View):\n\n def get(self, request):\n if request.user.is_authenticated:\n return redirect('main')\n form = UserAutForm()\n return render(request, 'login.html', {'form': form, 'my_login': 'ะ’ั…ะพะด', })\n\n def post(self, request):\n form = UserAutForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n return redirect('main')\n return render(request, 'login.html', context={'form': form, })\n\n" }, { "alpha_fraction": 0.6099982857704163, "alphanum_fraction": 0.6127375364303589, "avg_line_length": 33.56804656982422, "blob_id": "a7d00822e66cba5742117b2b2fee4586dc9b0405", "content_id": "bb2d9a40bcf829a6facefb7a3001ec0a21a4bc69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6126, "license_type": "no_license", "max_line_length": 113, "num_lines": 169, "path": "/code_archive.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "\"\"\"\n# ___________ ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ___________\nclass PhoneSFormAdd(forms.ModelForm):\n ''' ะดะปั ั‚ะพะณะพ ั‡ั‚ะพะฑั‹ ะฟั€ะพะฟะธัะฐั‚ัŒ empty_label=None '''\n\n subscriber = forms.ModelChoiceField(empty_label=None, queryset=Subscriber.objects.all())\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'ัะพั…ั€ะฐะฝะธั‚ัŒ'))\n\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-4'\n self.helper.field_class = 'col-lg-7'\n class Meta:\n model = Phone\n fields = ('number', 'mail', 'subscriber', 'substation', 'priority', 'description', 'search_number')\n def clean_search_number(self):\n raw_number = self.cleaned_data['number']\n for i in raw_number:\n if i.isalpha():\n raise ValueError('ั…ะผ, ะฐ ัƒ ะ’ะฐั ะฒ ะฝะพะผะตั€ะต ะฑัƒะบะฒั‹...', i)\n if re.match(r'[A-Za-zะ-ะฏะฐ-ั]', raw_number):\n raise ValueError('ั…ะผ, ะฐ ัƒ ะ’ะฐั ะฒ ะฝะพะผะตั€ะต ะฑัƒะบะฒั‹...')\n search_number = ''.join([sign for sign in raw_number if sign.isdigit()])\n return search_number\n\n\n\n## __________________ ั‚ะตะปะตั„ะพะฝั‹ ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ____________________\n\nclass AddSubscriberPhone1(View):\n\n def get(self, request, *args, **kwargs):\n form = PhoneSubscriberFormAdd()\n form.fields[\"subscriber\"].queryset = Subscriber.objects.filter(pk=self.kwargs['pk'])\n return render(request, 'tula_net/form_add_phone.html', context={'form': form})\n\n def post(self, request, *args, **kwargs):\n bound_form = PhoneSubscriberFormAdd(request.POST)\n if bound_form.is_valid():\n new_phone = bound_form.save()\n return redirect(new_phone)\n return render(request, 'tula_net/form_add_phone.html', context={'form': bound_form})\n\n\n\n\n# _______________ ัƒะดะฐะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ _________________\nclass PhoneDelete(View):\n\n def get(self, request,*args, **kwargs):\n phone = Phone.objects.get(pk=self.kwargs['pk'])\n return render(request, 'tula_net/form_del_phone.html', context={'phone': phone})\n\n def post(self, request, *args, **kwargs):\n phone = Phone.objects.get(pk=self.kwargs['pk'])\n phone.delete()\n return redirect(reverse('phones'))\n\n\n\"\"\"\n\n\"\"\"\nclass PsList(ListView):\n\n model = Substation\n context_object_name = 'substations'\n template_name = 'tula_net/substations.html'\n extra_context = title1\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['context_menu'] = context_menu\n context['groups'] = Group.objects.all()\n context['voltages'] = [35, 110, 220]\n return context\n\n\n\"\"\"\n\n\"\"\"\nะ”ะปั ะฟะตั€ะฒะพะน ะผะพะดะตะปะธ ะ’ะ› \n\"\"\"\n# class SectionPSView(ListView):\n# template_name = 'tula_net/section.html'\n# context_object_name = 'sections'\n#\n# def get_queryset(self):\n# return Section.objects.prefetch_related('feeders', 'lines', 'substation').select_related('voltage'). \\\n# filter(substation__pk=self.kwargs['pk'])\n#\n# def get_context_data(self, *, object_list=None, **kwargs):\n# context = super().get_context_data(**kwargs)\n# context['the_substation'] = Substation.objects.get(pk=self.kwargs['pk'])\n# return context\n\n\n# class LineForm(BaseCrispyForms, forms.ModelForm):\n# description = forms.CharField(label='ะžะฟะธัะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 1, }))\n#\n# class Meta:\n# model = TransmissionLine\n# fields = '__all__'\n\n\n# class AddLineView(View):\n# \"\"\" ะดะพะฑะฐะฒะปะตะฝะธะต ั„ะธะดะตั€ะฐ c ะŸะก !!! ะธ ะพะฝะพ ั€ะฐะฑะพั‚ะฐะตั‚ !!!\"\"\"\n#\n# def get(self, request, pk):\n# form = LineForm()\n# form.fields['section'].queryset = Section.objects.filter(voltage__pk=pk)\n# form.fields['voltage'].queryset = ClassVoltage.objects.filter(pk=pk)\n# form.fields['voltage'].initial = ClassVoltage.objects.get(pk=pk)\n# return render(request, 'tula_net/form_add_feeder.html', context={'form': form})\n#\n# def post(self, request, *args, **kwargs):\n# bound_form = LineForm(request.POST)\n# if bound_form.is_valid():\n# new_line = bound_form.save()\n# return redirect(new_line)\n# return render(request, 'tula_net/form_add_feeder.html', context={'form': bound_form})\n\n\n# class SectionView(DetailView):\n# context_object_name = 'section'\n# template_name = 'tula_net/one_section.html'\n#\n# def get_queryset(self):\n# return Section.objects.prefetch_related('feeders', 'lines').all()\n\n\n\n\n# class LinesViewMixin:\n# \"\"\" ัˆะฐะฑะปะพะฝ ะดะปั \"\"\"\n# model = TransmissionLine\n# context_object_name = 'lines'\n# template_name = 'tula_net/lines.html'\n# menu = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะบะพะฝั‚ะตั…ั‚ะฝะพะณะพ ะผะตะฝัŽ\n# flag = None # ะดะพะฑะฐะฒะปะตะฝะธะต ะดะปั ะพั‚ะพะฑั€ะฐะถะตะฝะธั ะฒั‹ะฑะพั€ะพะบ ะŸะก ะฟะพ ะณั€ัƒะฟะฟะฐะผ ะธ ะฝะฐะฟั€ัะถะตะฝะธัŽ\n#\n# def get_queryset(self):\n# return TransmissionLine.objects.select_related('management', 'voltage', 'group').all()\n#\n# def get_context_data(self, *, object_list=None, **kwargs):\n# context = super().get_context_data(**kwargs)\n# context['context_menu'] = self.menu\n# context['groups'] = GroupLine.objects.all()\n# context['voltages'] = ClassVoltage.objects.all()[1:3]\n# context['regions'] = Region.objects.filter(for_menu=True)\n# context[self.flag] = 1\n# return context\n\n\n\n\n\n# class OneLineView(DetailView):\n# model = TransmissionLine\n# context_object_name = 'line'\n# template_name = 'tula_net/one_line.html'\n#\n# def get_queryset(self):\n# return TransmissionLine.objects.prefetch_related('section', 'section__substation', 'maintenance'). \\\n# select_related('management', 'group', 'voltage', 'subscriber')" }, { "alpha_fraction": 0.6820250153541565, "alphanum_fraction": 0.6860068440437317, "avg_line_length": 37.15217208862305, "blob_id": "67d96f734e0a504d1ae83236682477c874662fa1", "content_id": "5ec62fb2c489380bb83ef378da550f6d4d10248c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1882, "license_type": "no_license", "max_line_length": 109, "num_lines": 46, "path": "/Tula_Networks/Tula_Networks/static/scripts/phones_add.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "let btnAdds = document.querySelectorAll('.btn_add');\nlet btnRemoves = document.querySelectorAll('.btn_remove');\nlet phoneBar = document.querySelector('.phone_bar');\n\n\nfunction copyPhone(btn) {\n let bigParent = btn.parentElement.parentElement.parentElement;\n let dublParent = bigParent.cloneNode(true);\n dublParent.classList.remove('col-6', 'col-md-6', 'col-lg-3', 'mb-2');\n dublParent.classList.add('col-12', 'mb-2', 'phone_in_bar');\n let aRemove = dublParent.querySelector('.btn_remove');\n let aAdd = dublParent.querySelector('.btn_add');\n let btnUp = dublParent.querySelector('.btn_up');\n let btnDown = dublParent.querySelector('.btn_down');\n// delWhenAdd = dublParent.querySelector('.del-when-add');\n// delWhenAdd.classList.add('display_none');\n aRemove.classList.remove('display_none');\n// btnUp.classList.remove('display_none');\n// btnDown.classList.remove('display_none');\n aAdd.classList.add('display_none');\n phoneBar.appendChild(dublParent);\n return phoneBar;\n}\n\n// ัƒะดะฐะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะธะท ัะฟะธัะบะฐ ะฟั€ะธ ะบะปะธะบะต ะฝะฐ ะพะดะธะฝ ะธะท btns\nfunction removePhoneFromBar(btns) {\n btns.forEach(some => {\n some.addEventListener('click', function(evt) {\n evt.preventDefault();\n let removeFromBar = some.parentElement.parentElement.parentElement;\n removeFromBar.remove();\n })\n })\n}\n\n\n// ะดะพะฑะฐะฒะปะตะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ ะฒ ัะฟะธัะพะบ ัั€ะฐะทัƒ ั removePhoneFromBar - ะฝะฐะปะถะตะฝะธะต ะพะฑั€ะฐะฑะพั‚ั‡ะธะบะฐ ะฝะฐ ะบะฐะถะดัƒัŽ btnsForDel ะบะฝะพะฟะบัƒ\n// ะดะพะฑั‹ะฒะปะตะฝะฝะพะณะพ ั‚ะตะปะตั„ะพะฝะฐ\nbtnAdds.forEach(item => {\n item.addEventListener('click', function(evt) {\n evt.preventDefault();\n copyPhone(item);\n let btnsForDel = phoneBar.querySelectorAll('.btn_remove');\n removePhoneFromBar(btnsForDel);\n })\n})\n\n\n\n" }, { "alpha_fraction": 0.36065220832824707, "alphanum_fraction": 0.4083530008792877, "avg_line_length": 44.24919128417969, "blob_id": "f6b91d2215e8500d7d83a9d4f96c4b92fd6b666e", "content_id": "ef9b25a05dee4db32a79513653dce39d2bfb73de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 14257, "license_type": "no_license", "max_line_length": 180, "num_lines": 309, "path": "/Tula_Networks/tula_net/templates/tula_net/one_line1.html", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n\n\n{% block title %}\n{{ block.super }} | {{ line.name }}\n{% endblock %}\n\n{% block cards1 %}\n\n<div class=\"card mb-3\">\n\n <h4 class=\"card-header mb-0 pb-0 text-center\">\n {% if line.short_name %} <b>{{ line.short_name }}</b> | {% endif %}\n {{ line.name }} <br>\n <hr class=\"my-0\"\n {% if line.voltage.class_voltage == 220 %}\n color=\"blue\"\n {% elif line.voltage.class_voltage == 110 %}\n color=\"yellow\"\n {% elif line.voltage.class_voltage == 35 %}\n color=\"red\"\n {% endif %}\n >\n {% if line.full_name %} {{ line.full_name }} <br> {% endif %}\n </h4>\n <div class=\"card-body col-12 col-lg-8 mx-auto px-5\">\n <h5 class=\"text-center\">ะŸะธั‚ะฐัŽั‰ะธะต / ั‚ั€ะฐะฝะทะธั‚ะฝั‹ะต ะŸะก, ะกะบะจ:</h5>\n <div class=\"col-12 col-sm-12 col-md-10 row mx-auto\">\n {% for pl in plus %}\n <a href=\"{{ pl.substation.get_absolute_url }}\" class=\"btn btn-outline-dark col-6 my-1\">\n <b>{{ pl.substation }}</b>\n </a>\n <a href=\"{{ pl.get_absolute_url }}\" class=\"btn btn-dark col-6 my-1\">\n <b> {{ pl.name }}</b>\n </a><br>\n {% endfor %}\n</div>\n\n <h5 class=\"text-center mt-2\">ะขัƒะฟะธะบะพะฒั‹ะต ะŸะก, ะกะบะจ:</h5>\n<div class=\"col-12 col-sm-12 col-md-10 row mx-auto\">\n {% for min in minus %}\n <a href=\"{{ min.substation.get_absolute_url }}\" class=\"btn btn-outline-dark col-6 my-1\">\n <b>{{ min.substation }}</b>\n </a>\n <a href=\"{{ min.get_absolute_url }}\" class=\"btn btn-dark col-6 my-1\">\n <b> {{ min.name }}</b>\n </a><br>\n {% empty %}\n ะฃ {{ line }} ะฝะตั‚ ั‚ัƒะฟะธะบะพะฒ\n {% endfor %}\n</div>\n\n <h6><hr class=\"my-2\">\n {% if line.management %}\n ะฃะฟั€ะฐะฒะปะตะฝะธะต : {{ line.management }}\n <hr class=\"my-2\">\n {% endif %}\n\n {% if line.maintenance.count %} ะ’ะตะดะตะฝะธะต :\n {% for m in line.maintenance.all %} {{ m }} , {% endfor %}\n <hr class=\"my-2\">\n {% endif %}\n\n {% if line.group %}\n {{ line.group }} ัƒั‡ะฐัั‚ะพะบ ัะป ะ›ะญะŸ\n <hr class=\"my-2\">\n {% endif %}</h6>\n\n {% if line.subscriber %}\n <a href=\"{{ line.subscriber.get_absolute_url }}\" class=\"btn btn-outline-secondary mx-auto py-0 my-1\"> <b>ะ’ะปะฐะดะตะปะตั†: {{ line.subscriber }}</b></a>\n {% endif %}\n\n</b>\n\n <div class=\"card-footer text-right py-0\">\n <div class=\"btn-group dropup\">\n <button type=\"button\" class=\"btn btn-primary dropdown-toggle dropdown-toggle-split\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\">\n ะฟั€ะฐะฒะธั‚ัŒ ะ‘ะ”<span class=\"sr-only\">Toggle Dropdown</span>\n </button>\n <div class=\"dropdown-menu\">\n <a class=\"dropdown-item\" href=\"{% url 'upd_line' line.pk %}\">ะธะทะผะตะฝะธั‚ัŒ ะ’ะ›</a>\n <a class=\"dropdown-item\" href=\"{% url 'del_line' line.pk %}\">ัƒะดะฐะปะธั‚ัŒ ะ’ะ›</a>\n </div>\n </div>\n </div>\n\n <div class=\"card-footer\">\n {% if line.length %}\n <p>ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ : {{ line.length }}</p>\n {% endif %}\n {% if line.number_columns %}\n <p>ะšะพะปะธั‡ะตัั‚ะฒะพ ะพะฟะพั€ : {{ line.number_columns }}</p>\n {% endif %}\n {% if line.description %}\n <p>ะŸั€ะธะผะตั‡ะฐะฝะธะต : {{ line.description|linebreaks }}</p>\n {% endif %}\n\n </div>\n </div>\n\n</div>\n\n {% for pl in plus %}\n {% if pl.substation.alien %}\n\n\n\n\n <div class=\"card mb-3 border border-dark\">\n\n <a href=\"{{ subscriber.get_absolute_url }}\" class=\"btn btn-dark p-1 m-3\" >\n <p class=\"m-1\">{{ pl.substation.owner }}</p>\n </a>\n\n <div class=\"card-body p-1 text-center\"> ะขะตะปะตั„ะพะฝั‹ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ:\n {% for phone in pl.substation.owner.phones.all %}\n <a href=\"{{ phone.get_absolute_url }}\" class=\"btn btn-light px-1 py-0 mx-1 border-secondary\" title=\"{{phone.description}}\">{{ phone }}</a>,\n {% endfor %}\n </div>\n\n\n <div class=\"container\">\n <div class=\"panel-group \">\n <div class=\"panel panel-default \">\n <div class=\"panel-heading\">\n <h4 class=\"panel-title text-center\">\n <a data-toggle=\"collapse\" class=\"btn btn-outline-dark btn-lg btn-block py-0\" href=\"#{{ pl.substation.owner.pk }}\">\n\n {% if pl.substation.owner.year_update > 1 %}\n <svg width=\"1em\" height=\"1em\" viewBox=\"0 0 16 16\" class=\"bi bi-chevron-double-down\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M1.646 6.646a.5.5 0 0 1 .708 0L8 12.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n <path fill-rule=\"evenodd\" d=\"M1.646 2.646a.5.5 0 0 1 .708 0L8 8.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n </svg>\n {% endif %}\n\n\t\t\t\t\t\t{% if pl.substation.owner.year_update == 1 %}\n\t\t\t\t\t\t <span class='text-danger'>ัะฟะธัะบะฐ ะปะธั† ะฝะตั‚ ัะพะฒัะตะผ</span>\n\t\t\t\t\t\t{% else %}\n ัะฟะธัะพะบ ะปะธั†\n {% if pl.substation.owner.year_update %}\n ะฝะฐ {{ pl.substation.owner.year_update }} ะณะพะด\n {% else %}\n <span class=\"text-warning\">ะณะพะด ะฝะต ัƒะบะฐะทะฐะฝ</span>\n {% endif %}\n \t{% endif %}\n\t\t\t\t\t {% if pl.substation.owner.year_update > 1 %}\n <svg width=\"1em\" height=\"1em\" viewBox=\"0 0 16 16\" class=\"bi bi-chevron-double-down\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M1.646 6.646a.5.5 0 0 1 .708 0L8 12.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n <path fill-rule=\"evenodd\" d=\"M1.646 2.646a.5.5 0 0 1 .708 0L8 8.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n </svg>\n\t\t\t\t\t {% endif %}\n </a>\n </h4>\n </div>\n\n <div id=\"{{ pl.substation.owner.pk }}\" class=\"panel-collapse collapse\">\n <ul class=\"list-group\">\n <li class=\"list-group-item list-group-item-light\">\n <div class=\"row mb-0\">\n {% for person in pl.substation.owner.persons.all %}\n {% if person.priority == 1 %}\n <div class=\" col-6 col-md-6 col-lg-4 px-1 my-2\">\n <a href=\"{{ person.get_absolute_url }}\" class=\"btn btn-light py-0 mx-auto d-block\" >\n <b>{{ person }}</b>\n <br>\n {% if person.position %} <span class='text-secondary'> {{person.position}} </span>{% endif %}\n </a>\n <div class=\"text-center\">\n {% for phone in person.phones.all %}\n <b>{{ phone }}</b>,\n {% endfor %}\n </div>\n </div>\n {% endif %}\n {% endfor %}\n </div>\n </li>\n\n <li class=\"list-group-item list-group-item-secondary py-0\">\n <p class=\"px-1 py-0 my-0\">\n {% for person in pl.substation.owner.persons.all %}\n {% if not person.priority or person.priority > 1 %}\n {{ person }} {% if person.position %} ({{person.position}}) {% endif %}\n\n {% for phone in person.phones.all %}\n {{ phone }},\n {% endfor %};\n\n {% endif %}\n {% endfor %}\n </p>\n </li>\n </ul>\n </div>\n </div>\n </div>\n </div>\n</div>\n {% endif %}\n {% endfor %}\n\n\n\n {% for pl in minus %}\n {% if pl.substation.alien %}\n\n\n\n\n <div class=\"card mb-3 border border-dark\">\n\n <a href=\"{{ subscriber.get_absolute_url }}\" class=\"btn btn-dark p-1 m-3\" >\n <p class=\"m-1\">{{ pl.substation.owner }}</p>\n </a>\n <div class=\"card-body p-1 text-center\"> ะขะตะปะตั„ะพะฝั‹ ะพั€ะณะฐะฝะธะทะฐั†ะธะธ:\n {% for phone in pl.substation.owner.phones.all %}\n <a href=\"{{ phone.get_absolute_url }}\" class=\"btn btn-light px-1 py-0 mx-1 border-secondary\" title=\"{{phone.description}}\">{{ phone }}</a>,\n {% endfor %}\n </div>\n\n\n <div class=\"container\">\n <div class=\"panel-group \">\n <div class=\"panel panel-default \">\n <div class=\"panel-heading\">\n <h4 class=\"panel-title text-center\">\n <a data-toggle=\"collapse\" class=\"btn btn-outline-dark btn-lg btn-block py-0\" href=\"#{{ pl.substation.owner.pk }}\">\n\t\t\t\t\t{% if pl.substation.owner.year_update > 1 %}\n <svg width=\"1em\" height=\"1em\" viewBox=\"0 0 16 16\" class=\"bi bi-chevron-double-down\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M1.646 6.646a.5.5 0 0 1 .708 0L8 12.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n <path fill-rule=\"evenodd\" d=\"M1.646 2.646a.5.5 0 0 1 .708 0L8 8.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n </svg>\n\t\t\t\t\t{% endif %}\n\t\t\t\t\t\t{% if pl.substation.owner.year_update == 1 %}\n\t\t\t\t\t\t<span class='text-danger'>ัะฟะธัะบะฐ ะปะธั† ะฝะตั‚ ัะพะฒัะตะผ</span>\n\t\t\t\t\t\t{% else %}\n ัะฟะธัะพะบ ะปะธั† {% if pl.substation.owner.year_update %}\n ะฝะฐ {{ pl.substation.owner.year_update }} ะณะพะด\n {% else %}\n <span class=\"text-warning\">ะณะพะด ะฝะต ัƒะบะฐะทะฐะฝ</span>\n {% endif %}\n \t{% endif %}\n\t\t\t\t\t{% if pl.substation.owner.year_update > 1 %}\n <svg width=\"1em\" height=\"1em\" viewBox=\"0 0 16 16\" class=\"bi bi-chevron-double-down\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M1.646 6.646a.5.5 0 0 1 .708 0L8 12.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n <path fill-rule=\"evenodd\" d=\"M1.646 2.646a.5.5 0 0 1 .708 0L8 8.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z\"/>\n </svg>\n\t\t\t\t\t{% endif %}\n </a>\n </h4>\n </div>\n\n <div id=\"{{ pl.substation.owner.pk }}\" class=\"panel-collapse collapse\">\n <ul class=\"list-group\">\n\n <li class=\"list-group-item list-group-item-light\">\n <div class=\"row mb-0\">\n {% for person in pl.substation.owner.persons.all %}\n {% if person.priority == 1 %}\n <div class=\" col-6 col-md-6 col-lg-4 px-1 my-2\">\n <a href=\"{{ person.get_absolute_url }}\" class=\"btn btn-light py-0 mx-auto d-block\" >\n <b>{{ person }}</b>\n <br>\n {% if person.position %} <span class='text-secondary'> {{person.position}} </span>{% endif %}\n </a>\n\n <div class=\"text-center\">\n {% for phone in person.phones.all %}\n <b>{{ phone }}</b>,\n {% endfor %}\n </div>\n </div>\n {% endif %}\n {% endfor %}\n </div>\n </li>\n\n <li class=\"list-group-item list-group-item-secondary py-0\">\n <p class=\"px-1 py-0 my-0\">\n {% for person in pl.substation.owner.persons.all %}\n {% if not person.priority or person.priority > 1 %}\n {{ person }} {% if person.position %} ({{person.position}}) {% endif %}\n\n {% for phone in person.phones.all %}\n {{ phone }},\n {% endfor %};\n\n {% endif %}\n {% endfor %}\n\n\n </p>\n </li>\n\n </ul>\n </div>\n </div>\n </div>\n </div>\n </div>\n {% endif %}\n {% endfor %}\n\n\n\n\n\n{% endblock %}\n\n" }, { "alpha_fraction": 0.6332408785820007, "alphanum_fraction": 0.6366758942604065, "avg_line_length": 41.5224723815918, "blob_id": "0de5d821e8957da51a4a8967c10e02f8ba0dd5f3", "content_id": "e29caa7144d1ff750143782cbc2d269a45df8037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8018, "license_type": "no_license", "max_line_length": 113, "num_lines": 178, "path": "/Tula_Networks/tula_net/forms.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.forms import TextInput, SelectDateWidget\n\nfrom .models import Feeder, Subscriber, Substation, Section, Phone, Person, Line, Feeder_characteristic\nfrom dal import autocomplete\n\nfrom django.core.exceptions import ValidationError\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nimport re\n\nfrom .utils import PhoneFormAddMixin, BaseCrispyForms\n\n\n#\n# ____________________ ะคะธะดะตั€ั‹ ______________________\n## ____________________ ะดะพะฑะฐะฒะปะตะฝะธะต ะคะธะดะตั€ะฐ ______________________\n\nclass FeederBaseForm(BaseCrispyForms, forms.ModelForm):\n \"\"\" ะฑะฐะทะพะฒะฐั ั„ะพั€ะผะฐ ะดะปั ะดะพะฑะฐะฒะปะตะฝะธั ั„ะธะดะตั€ะฐ \"\"\"\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Feeder\n fields = '__all__'\n\n\nclass FeederAddFromPSForm(FeederBaseForm):\n \"\"\" ะฑะฐะทะพะฒะฐั ั„ะพั€ะผะฐ ะดะปั ะดะพะฑะฐะฒะปะตะฝะธั ั„ะธะดะตั€ะฐ ั ะŸะก \"\"\"\n\n \"\"\" ะดะปั ั‚ะพะณะพ ั‡ั‚ะพะฑั‹ ะฟั€ะพะฟะธัะฐั‚ัŒ empty_label=None \"\"\"\n substation = forms.ModelChoiceField(label='ะŸะก', empty_label=None, queryset=Substation.objects.all())\n section = forms.ModelChoiceField(label='ะกะตะบั†ะธั', empty_label=None, queryset=Section.objects.all())\n\n\nclass FeederAddFromSubscriberForm(FeederBaseForm):\n subscriber = forms.ModelChoiceField(label='ะžั€ะณะฐะฝะธะทะฐั†ะธั', empty_label=None, queryset=Subscriber.objects.all())\n\n\n## ____________________ ะธะทะผะตะฝะตะฝะธะต ะคะธะดะตั€ะฐ ______________________\nclass FeederFormUpd(BaseCrispyForms, forms.ModelForm):\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Feeder\n fields = '__all__'\n # fields = ['name', 'substation', 'section', 'subscriber', 'in_reserve', 'attention', 'description']\n\n\nclass FeederCharForm(BaseCrispyForms, forms.ModelForm):\n\n class Meta:\n model = Feeder_characteristic\n fields = '__all__'\n\n\n\n# __________________ ั„ะพั€ะผะฐ ะดะพะฑะฐะฒะปะตะฝะธั ั‚ะตะปะตั„ะพะฝะฐ ะดะปั ะพั€ะณะฐะฝะธะทะฐั†ะธะธ _______________________\nclass PhoneSubscriberFormAdd(PhoneFormAddMixin, forms.ModelForm):\n \"\"\" ะดะปั ั‚ะพะณะพ ั‡ั‚ะพะฑั‹ ะฟั€ะพะฟะธัะฐั‚ัŒ empty_label=None \"\"\"\n subscriber = forms.ModelChoiceField(label='ะžั€ะณะฐะฝะธะทะฐั†ะธั', empty_label=None, queryset=Subscriber.objects.all())\n\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Phone\n fields = ('number', 'subscriber', 'priority', 'description', 'search_number')\n\n\n# __________________ ั„ะพั€ะผะฐ ะดะพะฑะฐะฒะปะตะฝะธั ั‚ะตะปะตั„ะพะฝะฐ ะดะปั ั‡ะตะปะพะฒะตะบะฐ _______________________\nclass PhonePersonFormAdd(PhoneFormAddMixin, forms.ModelForm):\n person = forms.ModelChoiceField(label='ะšั‚ะพ', empty_label=None, queryset=Person.objects.all())\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Phone\n fields = ('number', 'person', 'priority', 'description', 'search_number')\n\n\n# __________________ ั„ะพั€ะผะฐ ะดะพะฑะฐะฒะปะตะฝะธั ั‚ะตะปะตั„ะพะฝะฐ ะดะปั ะŸะก _______________________\nclass PhonePSFormAdd(PhoneFormAddMixin, forms.ModelForm):\n\n substation = forms.ModelChoiceField(label='ะŸะก', empty_label=None, queryset=Substation.objects.all())\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Phone\n fields = ('number', 'substation', 'priority', 'description', 'search_number')\n\n\n# __________________ ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐะฝะธะต ั‚ะตะปะตั„ะพะฝะฐ _______________________\nclass PhoneFormUpd(PhoneFormAddMixin, forms.ModelForm):\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Phone\n fields = ('number', 'person', 'subscriber', 'substation', 'priority', 'description', 'search_number')\n\n\n# ____________________ ะžั€ะณะฐะฝะธะทะฐั†ะธะธ ______________________\nclass SubscriberFormAdd(BaseCrispyForms, forms.ModelForm):\n description = forms.CharField(label='ะžะฟะธัะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Subscriber\n fields = '__all__'\n\n\nclass PersonFormAdd(BaseCrispyForms, forms.ModelForm):\n subscriber = forms.ModelChoiceField(label='ะžั€ะณะฐะฝะธะทะฐั†ะธั', empty_label=None, queryset=Subscriber.objects.all())\n description = forms.CharField(label='ะŸั€ะธะผะตั‡ะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Person\n fields = '__all__'\n\n\nclass SubstationFormUpd(BaseCrispyForms, forms.ModelForm):\n description = forms.CharField(label='ะžะฟะธัะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n location = forms.CharField(label='ะ ะฐัะฟะพะปะพะถะตะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Substation\n fields = '__all__'\n\n\nclass SectionAddForm(BaseCrispyForms, forms.ModelForm):\n substation = forms.ModelChoiceField(label='ะŸะก', empty_label=None, queryset=Substation.objects.all())\n description = forms.CharField(label='ะžะฟะธัะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 3, }))\n\n class Meta:\n model = Section\n fields = '__all__'\n\n\n\n\nclass Line1Form(BaseCrispyForms, forms.ModelForm):\n # description = forms.CharField(label='ะžะฟะธัะฐะฝะธะต', required=False, widget=forms.Textarea(attrs={\"rows\": 1, }))\n class Meta:\n model = Line\n fields = '__all__'\n\n widgets = {\n 'name': forms.TextInput(attrs={'class': 'form-control'}),\n 'full_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'short_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'management': forms.Select(attrs={'class': 'form-control'}),\n 'maintenance': forms.SelectMultiple(attrs={'class': 'form-control'}),\n 'subscriber': forms.Select(attrs={'class': 'form-control'}),\n 'voltage': forms.Select(attrs={'class': 'form-control'}),\n 'group': forms.Select(attrs={'class': 'form-control'}),\n 'ps_p1': forms.NumberInput(attrs={'class': 'form-control'}),\n 'ps_p2': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_p1': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_p2': forms.NumberInput(attrs={'class': 'form-control'}),\n 'ps_m1': forms.NumberInput(attrs={'class': 'form-control'}),\n 'ps_m2': forms.NumberInput(attrs={'class': 'form-control'}),\n 'ps_m3': forms.NumberInput(attrs={'class': 'form-control'}),\n 'ps_m4': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_m1': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_m2': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_m3': forms.NumberInput(attrs={'class': 'form-control'}),\n 'sec_m4': forms.NumberInput(attrs={'class': 'form-control'}),\n 'length': forms.NumberInput(attrs={'class': 'form-control'}),\n 'number_columns': forms.NumberInput(attrs={'class': 'form-control'}),\n 'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n # 'kvl': forms.BooleanField(),\n }\n\n\nclass UserAutForm(AuthenticationForm):\n username = forms.CharField(label='ะ›ะพะณะธะฝ',\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(label='ะŸะฐั€ะพะปัŒ',\n widget=forms.PasswordInput(attrs={'class': 'form-control'}))\n" }, { "alpha_fraction": 0.6554856896400452, "alphanum_fraction": 0.6687631011009216, "avg_line_length": 38.75, "blob_id": "88dcb294a5e8943d008f51f9a353698dbbdbcac4", "content_id": "97b92b631f672bc850db98047c739645f68f6813", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1732, "license_type": "no_license", "max_line_length": 119, "num_lines": 36, "path": "/Tula_Networks/Tula_Networks/static/scripts/maps/read-data.js", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "//ัะพะฑะธั€ะฐะตั‚ JSON ะธะท ะฟะพะณะพ, ั‡ั‚ะพ ะพั‚ั€ะตะฝะดะตั€ะธะด django\nconst jsPsInfoD = document.querySelector('.js-ps-info-dict');\nconst jsonD1 = jsPsInfoD.textContent;\nconst jsonDataProto = \"{\" + jsonD1.slice(0, jsonD1.length-2) + \"}\";\nconst jsonData = JSON.parse(jsonDataProto);\n//---\n\n// ะฟะตั€ะตะฒะพะดะธั‚ ะบะพะพั€ะดะธะฝะฐั‚ั‹ ะฒ ั‚ะฐะบะธะต ะบะพั‚ะพั€ั‹ะต ั‡ะธั‚ะฐะตั‚ leaflet\nconst getCoordinate = (coo) => {\n const directions = ['E', 'W', 'N', 'S']\n\n if (directions.some((dir) => coo.endsWith(dir))) {\n const newCoo = coo.replace(/\\D/g, ' ').trim().split(' ').map((init) => +init);\n return newCoo[0] + newCoo[1]/60 + newCoo[2]/3600;\n }\n return +coo;\n}\n\nconst getTrueCoordinate = (coo) => {\n const [lat, ...rest] = coo.trim().split(' ');\n const lng = rest[rest.length-1]\n return [getCoordinate(lat), getCoordinate(lng)];\n};\n//---\n\n\n// ะทะฐะฑะธั€ะฐะตั‚ ะธะท ั€ะฐะฝะตะต ะฟะพะดะบะปัŽั‡ะตะฝะฝะพะณะพ coordinates.js ัะปะพะฒะฐั€ัŒ ะบะพะพั€ะดะธะฝะฐั‚, ะดะตะปะฐะตั‚ ะผะฐััะธะฒ: [ะฝะพะผะตั€, [ัˆะธั€ะพั‚ะฐ, ะดะพะปะณะพั‚ะฐ]]\n// .ั„ะธะปัŒั€ั€ัƒะตั‚ ะพั‚ ะฟัƒัั‚ั‹ั… ะบะพะพั€ะดะธะฝะฐั‚\n// .ะดะตะปะฐะตั‚ ะผะฐััะธะฒ ะพะฑัŠะตะบั‚ะพะฒ {ะฝะพะผะตั€, ะพั‚ั„ะพั€ะผะฐั‚ะธั€ะพะฒะฐะฝะฝั‹ะต ะบะพั€ะดะธะฝะฐั‚ั‹}\nconst allPoints = Object.entries(coordinates)\n .filter((point) => point[1] !== '')\n .map(point => ({number: +point[0], coordinate: getTrueCoordinate(point[1])}))\n\n// ัะตะดะธะฝัะตั‚ ะทะฝะฐั‡ะตะฝะธั ะฒ 2-ั… ะผะฐัะธะฒะพะฒ (ะธะท ะฟั€ะตะพะฑั€ะฐะทะพะฒะฐะฝะฝะพะณะพ coordinates.js ะธ ะธะท ั€ะฐัะฟะฐั€ัะตะฝะฝะพะณะพ ะดะถะตะนัะพะฝะฐ ะพั‚ั€ะตะฝะดะตั€ะตะฝะฝะพ ะดะถะฐะฝะณะพ)\nconst allBigPoints = allPoints.map(point => ({...point, ...jsonData[point.number]}));\n// console.log(allBigPoints)\n" }, { "alpha_fraction": 0.3778334856033325, "alphanum_fraction": 0.39047515392303467, "avg_line_length": 37.233333587646484, "blob_id": "78ec1dab731512a23ad5dcd01126e62d3b160992", "content_id": "9f594e1e5228dfe598aaf244e4ee846ac6d964be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 9523, "license_type": "no_license", "max_line_length": 176, "num_lines": 240, "path": "/Tula_Networks/tula_net/templates/tula_net/one_ps.html", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}\n{{ block.super }} | ะŸะก {{ ps }}\n{% endblock %}\n\n {% block first_section %}\n <div class=\"row mt-5\">\n <div class=\"col-12 col-lg-10 offset-lg-2 m-auto\">\n <div class=\"card mb-4\">\n <div class=\"card-body px-4\">\n <div class=\"row\">\n\n <div class=\"col-12 col-md-3 col-lg-3 col-lx-2 d-flex row m-auto\">\n <a href=\"{% url 'subscriber_ps' ps.pk %}\" type=\"button\" class=\"col-6 col-md-12 btn btn-secondary border border-dark py-2\">\n <b>ะะฑะพะฝะตะฝั‚ั‹ ะฟะพะดั€ะพะฑะฝะพ</b>\n </a>\n <a href=\"{% url 'substation_f' ps.pk %}\" type=\"button\" class=\"col-6 col-md-12 btn btn-secondary border border-dark py-2\">\n <b>ะคะธะดะตั€ั‹ ะฟะพะดั€ะพะฑะฝะพ</b>\n </a>\n </div>\n\n <div class=\"col-12 col-md-9 col-lg-9 col-lx-10\">\n <h4 class=\"bg-light text-center\">\n ะŸะก โ„– {{ ps.number }} {{ ps }} {% include 'incl/_voltage_full.html' %} ะบะ’\n </h4>\n <a href=\"{% url 'group' ps.group.pk %}\" class=\"btn btn-dark m-1\"><h6> <b>ะ“ั€ัƒะฟะฟะฐ:</b> {{ ps.group }}</h6></a>\n {% if ps.owner %}\n <a href=\"{% url 'subscriber' ps.owner.pk %}\" class=\"btn btn-outline-dark m-1\"><h6>ะ’ะปะฐะดะตะปะตั† : {{ ps.owner }}</h6></a>\n {% endif %}\n {% if ps.location %}\n <p> <b>ะ ะฐัะฟะพะปะพะถะตะฝะธะต:</b> {{ ps.location|linebreaks }}</p>\n {% endif %}\n\n <div class=\"card-footer text-right\">\n <div class=\"btn-group dropup\">\n <button type=\"button\" class=\"btn btn-primary dropdown-toggle dropdown-toggle-split\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\">\n ะฟั€ะฐะฒะธั‚ัŒ ะ‘ะ”<span class=\"sr-only\">Toggle Dropdown</span>\n </button>\n <div class=\"dropdown-menu\">\n <a class=\"dropdown-item\" href=\"{% url 'add_section' ps.pk %}\">ะดะพะฑะฐะฒะธั‚ัŒ ัะตะบั†ะธัŽ ะฝะฐ ะŸะก</a>\n <a class=\"dropdown-item\" href=\"{% url 'add_feeder_from_ps' ps.pk %}\">ะดะพะฑะฐะฒะธั‚ัŒ ั„ะธะดะตั€ ะฝะฐ ะŸะก</a>\n <a class=\"dropdown-item\" href=\"{% url 'add_phone_ps' ps.pk %}\">ะดะพะฑะฐะฒะธั‚ัŒ ั‚ะตะปะตั„ะพะฝ ะฝะฐ ะŸะก</a>\n <a class=\"dropdown-item\" href=\"{% url 'upd_substation' ps.pk %}\">ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ ะŸะก</a>\n </div>\n </div>\n </div>\n\n </div>\n </div>\n </div>\n\n {% if ps.phones.count %}\n <h5 class=\"text-center p-0 m-0\"> ั‚ะตะปะตั„ะพะฝั‹:\n {% for phone in ps.phones.all %}\n <a class=\"btn btn-light p-0\" href=\"{{ phone.get_absolute_url }}\"><b>{{ phone }}</b></a>,\n {% endfor %}\n </h5>\n {% endif %}\n </div>\n\n </div>\n </div>\n\n <div class=\"card\">\n\n <div class=\"card-header pt-2 pb-0\">\n <b>ะกะฟะธัะพะบ ัะตะบั†ะธะน:</b>\n </div>\n\n <div class=\"card-body py-1\">\n {% for section in ps.sections.all %}\n\n <a href=\"{{ section.get_absolute_url }}\" type=\"button\"\n {% if section.voltage.class_voltage < 8 %}\n class=\"btn btn-outline-dark text-success px-2 py-0\"\n {% elif section.voltage.class_voltage < 20 %}\n class=\"btn btn-outline-dark text-primary px-2 py-0\"\n {% elif section.voltage.class_voltage < 40 %}\n class=\"btn btn-outline-dark text-danger px-2 py-0\"\n {% elif section.voltage.class_voltage < 120 %}\n class=\"btn btn-outline-dark text-warning px-2 py-0\"\n {% else %}\n class=\"btn btn-outline-dark px-2 py-0\"\n {% endif %}\n >{{ section.name }}\n {% if section.from_T != section.number %}\n (T-{{ section.from_T }})\n {% endif %}\n </a>\n\n {% endfor %}<br>\n\n </div>\n\n <div class=\"card-header pt-2 pb-0\">\n <b>ะกะฟะธัะพะบ ั„ะธะดะตั€ะพะฒ:</b>\n </div>\n\n <div class=\"card-body py-1\">\n {% for section in ps.sections.all %}\n {% if section.voltage.class_voltage < 10 %}\n {% for feeder in section.feeders.all %}\n <a href=\"{{ feeder.get_absolute_url }}\" type=\"button\" class=\"{{ feeder_6 }}\" title=\"{{feeder.description}}\">\n {% include 'incl/_feeder_marker.html' %}\n </a>\n {% endfor %}\n {% endif %}\n {% endfor %}\n <br>\n {% for section in ps.sections.all %}\n {% if section.voltage.class_voltage > 8 and section.voltage.class_voltage < 30 %}\n {% for feeder in section.feeders.all %}\n <a href=\"{{ feeder.get_absolute_url }}\" type=\"button\" class=\"{{ feeder_10 }}\">\n {% include 'incl/_feeder_marker.html' %}\n </a>\n {% endfor %}\n {% endif %}\n {% endfor %}\n </div>\n\n <div class=\"card pb-3\">\n\n {% if lines.exists %}\n <div class=\"card-header pt-2 pb-0\">\n <b>ะกะฟะธัะพะบ ะ’ะ›:</b>\n </div>\n <div class=\"card-body py-1\">\n {% for line in lines %}\n {% if line.voltage.class_voltage < 40 %}\n <a href=\"{{ line.get_absolute_url }}\" class=\"{{ line_35 }}\">\n {{ line.name }}\n </a>\n {% endif %}\n {% endfor %}\n <br>\n {% for line in lines %}\n {% if line.voltage.class_voltage < 120 and line.voltage.class_voltage > 40 %}\n <a href=\"{{ line.get_absolute_url }}\" class=\"{{ line_110 }}\">\n {{ line.name }}\n </a>\n {% endif %}\n {% endfor %}\n {% endif %}\n\n </div>\n </div>\n </div>\n\n <div class=\"card mb-4\">\n {% if not ps.alien %}\n <div class=\"card-body px-4\">\n\n\n <h5 class=\"text-center\">ะกัƒะผะผะฐ ะฟะพ ะŸะก - ั€ะฐะฑะพั‚ะฐะตั‚, ะตัะปะธ ะดะฐะฝะฝั‹ะต ะฟะพ ั„ะธะดะตั€ะฐะผ ะทะฐะฑะธั‚ั‹ ะฒะตั€ะฝะพ</h5>\n\n <div class=\"my-3 card-footer text-center\">\n <style type=\"text/css\">\n table {\n border-collapse: collapse;\n }\n table th,\n table td {\n padding: 0 3px;\n }\n table tr {\n border-top: 1px solid #000;\n border-bottom: 1px solid #000;\n }\n\n table tr td:last-child {\n text-align: center;\n }\n table tbody {\n border-top: 1px solid #000;\n border-bottom: 1px solid #000;\n }\n </style>\n <table class=\"m-auto\">\n <thead>\n <tr>\n <th>ั…ะฐั€ะฐะบั‚ะตั€ะธัั‚ะธะบะธ</th>\n <th>ะทะฝะฐั‡ะตะฝะธั</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>ะขะŸ ะฝะฐัˆะธ</td>\n <td> ะฝะฐัˆะธ {{ ps.tp_ours_sum }} / <span class=\"text-secondary\">ั‡ัƒะถะธะต {{ ps.tp_alien_sum }}</span></td>\n </tr>\n <tr>\n <td>ะะŸ</td>\n <td>{{ ps.villages_sum }}</td>\n </tr>\n\n <tr>\n <td>ะะฐัะตะปะตะฝะธะต</td>\n <td> {{ ps.population_sum }}</td>\n </tr>\n\n <tr>\n <td>ะขะพั‡ะบะธ ะฟะพัั‚ะฐะฒะบะธ</td>\n <td>{{ ps.points_sum }}</td>\n </tr>\n\n <tr>\n <td>ะŸั€ะพั‚ัะถะตะฝะฝะพัั‚ัŒ</td>\n <td>{{ ps.length_sum|floatformat:3 }} ะบะผ</td>\n </tr>\n\n <tr>\n <td>ะกะพั†ะธะฐะปะบะฐ</td>\n <td>{{ ps.social_sum }} </td>\n\n </tr>\n\n\n\n <tr>\n <td>ะŸะพั‚ั€ะตะฑะปะตะฝะธะต</td>\n <td><span class=\"bg-primary\">ะทะธะผะฐ : {{ ps.power_winter_sum|floatformat:3 }}</span> /\n <span class=\"bg-warning\">ะปะตั‚ะพ: {{ ps.power_summer_sum|floatformat:3 }}</span> ะœะ’ั‚</td>\n </tr>\n </tbody>\n </table>\n </div>\n\n </div>\n {% endif %}\n\n <div class=\"card mb-4\">\n <div class=\"card-body px-4\">\n {% if ps.description|linebreaks %}\n <p><b>ะž ะฟะพะดัั‚ะฐะฝั†ะธะธ:</b> {{ ps.description|linebreaks }} </p>\n {% endif %}\n </div>\n </div>\n </div>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.53287672996521, "alphanum_fraction": 0.5472602844238281, "avg_line_length": 27.096153259277344, "blob_id": "f3e4aa74594fc7d48ecbde64d45d4e4c114db803", "content_id": "5c207e20ae1f576f9a7462a6fd0f46e0bdfd6aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 119, "num_lines": 52, "path": "/Tula_Networks/tula_net/templates/tula_net/section.html", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}\n{{ block.super }} | ะกะตะบั†ะธะธ\n{% endblock %}\n\n{% block cards1 %}\n\n<div class=\"d-flex justify-content-center my-3 p-1\">\n<a class=\"btn btn-outline-secondary\" href=\"{{ the_substation.get_absolute_url }}\">\n<h2 class=\"text-center m-0\">{{ the_substation }}</h2>\n</a>\n</div>\n{% for section in sections %}\n\n <div class=\"card mb-2\">\n <a\n {% if section.voltage.class_voltage == 6 %}\n class=\"btn btn-outline-dark text-success\"\n {% elif section.voltage.class_voltage == 10 %}\n class=\"btn btn-outline-dark text-primary\"\n {% elif section.voltage.class_voltage == 35 %}\n class=\"btn btn-outline-dark text-danger\"\n {% elif section.voltage.class_voltage == 110 %}\n class=\"btn btn-outline-dark text-warning\"\n {% endif %}\n\n\n href=\"{{ section.get_absolute_url }}\">\n <h5 class=\"card-header p-1\">{{ section }} </h5>\n </a>\n <div class=\"card-body p-1\">\n\n <p class=\"card-text\"><b class=\"card-title\"></b>\n {% for feeder in section.feeders.all %}\n <a href=\"{{ feeder.get_absolute_url }}\" class=\"btn btn-secondary py-1\">{{ feeder.name }} </a>\n {% endfor %}\n {% for line in lines %}\n <a href=\"{{ line.get_absolute_url }}\" class=\"btn btn-secondary py-1\">{{ line.short_name }} | {{ line.name }} </a>\n {% endfor %}\n </p>\n </div>\n </div>\n\n{% endfor %}\n<br>\n\n\n\n\n\n{% endblock %}" }, { "alpha_fraction": 0.5183333158493042, "alphanum_fraction": 0.5400000214576721, "avg_line_length": 21.259260177612305, "blob_id": "c4484aaecf77c390619213e55cce0dd23315ebc9", "content_id": "9962ba595f2b6e6e58fd9044aab2bf751498d2da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 644, "license_type": "no_license", "max_line_length": 102, "num_lines": 27, "path": "/Tula_Networks/templates/login.html", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}\n{{ block.super }} | ะ’ั…ะพะด\n{% endblock %}\n\n\n{% block in %}\n\n\n<div class=\"row\">\n <div class=\"col-8 col-lg-4 offset-2 offset-lg-4\">\n\n <form method=\"post\">\n <div class=\"text-center mt-5 b-1\">\n <h1 class=\"h3 mb-3 font-weight-normal\">{{ title }}</h1>\n <p class=\"h5 font-weight-light\">ะ”ะปั ะฟั€ะพัะผะพั‚ั€ะฐ ั‚ั€ะตะฑัƒะตั‚ัั ะฒั…ะพะด.</p>\n </div>\n {% csrf_token %}\n {{ form.as_p }}\n <input type=\"submit\" class=\"btn btn-primary btn-block mt-4 mb-2\" value=\"ะ’ั…ะพะด / ะ ะตะณะธัั‚ั€ะฐั†ะธั\">\n </form>\n\n </div>\n</div>\n\n{% endblock %}" }, { "alpha_fraction": 0.6904032826423645, "alphanum_fraction": 0.6955078840255737, "avg_line_length": 61.6879997253418, "blob_id": "9354e7e457b08605eb12a309492d1f094da23f2a", "content_id": "52c03fa35b927615f4a78073926be304742acbbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8388, "license_type": "no_license", "max_line_length": 120, "num_lines": 125, "path": "/Tula_Networks/tula_net/urls.py", "repo_name": "gitSergeyhab/Tula_networks", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom django.contrib.auth.views import LogoutView\n\nfrom tula_net.views import MainView, PsListView, GroupPSView, VoltPSView, OnePSView, SectionListView, OneSectionView, \\\n OneSubstationView, SubscriberListView, OneSubscriberView, SubscribersBySectionView, \\\n SubscribersByPSView, SubstationsBySubscriberView, SearcherSubscribersView, SearcherPSView, SearcherPersonsView, \\\n AllFeedersView, OneFeederView, OnePersonView, PersonListView, OnePhoneView, PhoneListView, AddFeederFromPSView, \\\n UpdFeederView, UpdPhoneView, AddPersonPhoneView, \\\n SearcherPhonesView, AddPSPhoneView, AddSubscriberPhoneView, PhoneDeleteView, FeederDeleteView, AddSubscriberView, \\\n UpdSubscriberView, SubscriberDeleteView, AddPersonView, UpdPersonView, DelPersonView, UpdSubstationView, \\\n AddFeederFromSubscriberView, AddSectionFromPSView, UpdSectionView, AddFeederFromSecView, \\\n AddSubstationView, LinesGroupView, LinesVoltageView, LinesRegionView, \\\n UpdLineView, LineDeleteView, SectionDeleteView, SearcherLinesView, Lines1View, OneLine1View, AddLine1View, \\\n Section1View, SearcherFeedersView, FeedersView, MyLogin, AddCharacterFeederView, UpdCharacterFeederView, CharsView,\\\n OneCharsView, UpdCharacterNoFeederView, SearcherPhonesToNamesView, Map, YMap\n\nurlpatterns = [\n path('map', Map.as_view(), name='map'),\n path('y-map', YMap.as_view(), name='y-map'),\n\n path('in', MyLogin.as_view(), name='in'),\n path('out', LogoutView.as_view(), name='out'),\n path('', MainView.as_view(), name='main'),\n\n # ะŸะก\n path('substations/', PsListView.as_view(), name='substations'),\n path('substation/<int:pk>/', OnePSView.as_view(), name='substation'),\n path('group/<int:pk>/', GroupPSView.as_view(), name='group'),\n path('voltage/<int:pk>/', VoltPSView.as_view(), name='voltage'),\n # ะพะดะฝะฐ ะŸะก ัะพ ัะฟะธัะบะพะผ ัะตะบั†ะธะน - ัะตะบั†ะธั ัะพ ัะฟะธัะบะพะผ ั„ะธะดะตั€ะพะฒ:\n # path('section_ps/<int:pk>/', SectionPSView.as_view(), name='section_ps'),\n\n # ะกะบะจ\n path('section/<int:pk>/', Section1View.as_view(), name='one_section'),\n path('sections/', SectionListView.as_view(), name='sections'),\n\n # 1 ะปะธัั‚ ะฒัะตั… ั„ะธะดะตั€ะพะฒ, 2 ะบะฐั€ั‚ะพั‡ะบะฐ 1-ะณะพ ั„ะธะดะตั€ะฐ, 3-4 ะฒัะต ั„ะธะดะตั€ะฐ ะฟะพ ัะตะบั†ะธะธ-ะฟะพะดัั‚ะฐะฝะฝั†ะธะธ\n\n path('sfeeders/', FeedersView.as_view(), name='sfeeders'),\n path('feeders/', AllFeedersView.as_view(), name='feeders'),\n path('feeder/<int:pk>/', OneFeederView.as_view(), name='feeder'),\n path('feeders/section/<int:pk>/', OneSectionView.as_view(), name='section'),\n path('feeders/substation/<int:pk>/', OneSubstationView.as_view(), name='substation_f'),\n\n # 1 ะปะธัั‚ ะฒัะตั… ะฐะฑะพะฝะตะฝั‚ะพะฒ, 2 ะบะฐั€ั‚ะพั‡ะบะฐ 1-ะณะพ ะฐะฑะพะฝะตะฝั‚ะฐ, 3-4 ะพะดะฝะฐ ะกะบะจ ะธะปะธ ะŸะก ัะพ ัะฟะธัะบะพะผ ะฐะฑะพะฝะตะฝั‚ะพะฒ ัะพ ัะฟะธัะบะพะผ ั„ะธะดะตั€ะพะฒ\n path('subscribers/', SubscriberListView.as_view(), name='subscribers'),\n path('subscriber/<int:pk>/', OneSubscriberView.as_view(), name='subscriber'),\n path('subscribers/section/<int:pk>/', SubscribersBySectionView.as_view(), name='subscriber_sec'),\n path('subscribers/substation/<int:pk>/', SubscribersByPSView.as_view(), name='subscriber_ps'),\n # ะพะดะธะฝ ะฐะฑะพะฝะตะฝั‚ ัะพ ัะฟะธัะบะพะผ ะŸะก ัะพ ัะฟะธัะบะพะผ ั„ะธะดะตั€ะพะฒ\n path('substations/subscriber/<int:pk>/', SubstationsBySubscriberView.as_view(), name='subscriber_ss'),\n\n # 1 ะปะธัั‚ ะฒัะตั… ะปัŽะดะตะน, 2 ะบะฐั€ั‚ะพั‡ะบะฐ 1-ะณะพ ั‡ะตะปะพะฒะตะบะฐ\n path('persons/', PersonListView.as_view(), name='persons'),\n path('person/<int:pk>/', OnePersonView.as_view(), name='person'),\n\n # 1 ะปะธัั‚ ะฒัะตั… ั‚ะตะปะตั„ะพะฝะพะฒ, 2 ะบะฐั€ั‚ะพั‡ะบะฐ 1-ะณะพ ั‚ะตะปะตั„ะพะฝะฐ\n path('phones/', PhoneListView.as_view(), name='phones'),\n path('phone/<int:pk>/', OnePhoneView.as_view(), name='phone'),\n\n # 1 ะปะธัั‚ ะฒัะตั… ะ’ะ›, 2 ะบะฐั€ั‚ะพั‡ะบะฐ 1-ะน ะ’ะ›\n path('lines/', Lines1View.as_view(), name='lines'),\n path('line/<int:pk>/', OneLine1View.as_view(), name='line'),\n path('line_group/<int:pk>/', LinesGroupView.as_view(), name='line_group'),\n path('line_voltage/<int:pk>/', LinesVoltageView.as_view(), name='line_voltage'),\n path('line_region/<int:pk>/', LinesRegionView.as_view(), name='line_region'),\n\n # ะฟะพะธัะบะธ 1. ะฟะพ ะฐะฑะพะฝะตะฝั‚ะฐ(ะฟะพะปะฝะพะต ะธ ัะพะบั€ะฐั‰ะตะฝะพะต ะธะผั), 2. ะฟะพ ะŸะก, 3. ะฟะพ ะปัŽะดัะผ, 4. ั‚ะตะปะตั„ะพะฝะฐะผ\n path('searcher_subscribers/', SearcherSubscribersView.as_view(), name='searcher_subscribers'),\n path('searcher_substations/', SearcherPSView.as_view(), name='searcher_substations'),\n path('searcher_persons/', SearcherPersonsView.as_view(), name='searcher_persons'),\n path('searcher_phones/', SearcherPhonesView.as_view(), name='searcher_phones'),\n path('searcher_phones_alt/', SearcherPhonesToNamesView.as_view(), name='searcher_phones_alt'),\n path('searcher_lines/', SearcherLinesView.as_view(), name='searcher_lines'),\n path('searcher_feeders/', SearcherFeedersView.as_view(), name='searcher_feeders'),\n # ั„ะพั€ะผั‹\n # ั„ะธะดะตั€ะฐ 1.ะดะพะฑะฐะฒะปะตะฝะธะต ั ะŸะก, 2. !... ะพั‚ ะฐะฑะพะฝะตะฝั‚ะฐ! 3.ะพะฑะฝะพะฒะปะตะฝะธะต, 4. ัƒะดะฐะปะตะฝะธะต\n path('add_feeder/from_ps_pk/<int:pk>/', AddFeederFromPSView.as_view(), name='add_feeder_from_ps'),\n path('add_feeder/from_ss_pk/<int:pk>/', AddFeederFromSubscriberView.as_view(), name='add_feeder_from_ss'),\n path('add_feeder/from_sec_pk/<int:pk>/', AddFeederFromSecView.as_view(), name='add_feeder_from_sec'),\n path('upd_feeder/<int:pk>/', UpdFeederView.as_view(), name='upd_feeder'),\n path('del_feeder/<int:pk>/', FeederDeleteView.as_view(), name='del_feeder'),\n # ะฐะฑะพะฝะตะฝั‚ั‹\n path('add_subscriber/', AddSubscriberView.as_view(), name='add_subscriber'),\n path('upd_subscriber/<int:pk>/', UpdSubscriberView.as_view(), name='upd_subscriber'),\n path('del_subscriber/<int:pk>', SubscriberDeleteView.as_view(), name='del_subscriber'),\n # ะปะธั†ะฐ\n path('add_person/from_ss_pk/<int:pk>/', AddPersonView.as_view(), name='add_person'),\n path('upd_person/<int:pk>/', UpdPersonView.as_view(), name='upd_person'),\n path('del_person/<int:pk>/', DelPersonView.as_view(), name='del_person'),\n # ั‚ะตะปะตั„ะพะฝั‹ ะดะพะฑะฐะฒะปะตะฝะธะต 1. ะพั‚ ะปะธั†ะฐ, 2. ะพั‚ ะฐะฑะพะฝะตะฝั‚ะฐ, 3. ะพั‚ ะŸะก, 4, 5\n path('add_phone/from_person_pk/<int:pk>/', AddPersonPhoneView.as_view(), name='add_phone_p'),\n path('add_phone/from_subscriber_pk/<int:pk>/', AddSubscriberPhoneView.as_view(), name='add_phone'),\n path('add_phone/from_substation_pk/<int:pk>/', AddPSPhoneView.as_view(), name='add_phone_ps'),\n path('upd_phone/<int:pk>/', UpdPhoneView.as_view(), name='upd_phone'),\n path('del_phone/<int:pk>/', PhoneDeleteView.as_view(), name='del_phone'),\n # ะŸะก\n path('add_substation/', AddSubstationView.as_view(), name='add_substation'),\n path('upd_substation/<int:pk>', UpdSubstationView.as_view(), name='upd_substation'),\n # ะกะบะจ\n path('add_section/from_ps_pk/<int:pk>/', AddSectionFromPSView.as_view(), name='add_section'),\n path('upd_section/<int:pk>/', UpdSectionView.as_view(), name='upd_section'),\n path('del_section/<int:pk>/', SectionDeleteView.as_view(), name='del_section'),\n\n path('add_line/', AddLine1View.as_view(), name='add_line'),\n path('upd_line/<int:pk>/', UpdLineView.as_view(), name='upd_line'),\n path('del_line/<int:pk>/', LineDeleteView.as_view(), name='del_line'),\n\n # ั…ะฐั€ะบะธ ั„ะธะดะตั€ะพะฒ\n path('character/<int:pk>', OneCharsView.as_view(), name='feeder_char'),\n # ะปะธัั‚ ั…-ะบ:\n path('characters/', CharsView.as_view(), name='chars'),\n # ะดะพะฑะฐะฒะปะตะฝะธะต ั ั„ะธะดะตั€ะฐ:\n path('add_character/<int:pk>/', AddCharacterFeederView.as_view(), name='add_charact'),\n # ะดะปั ะฟั€ะฐะฒะบะธ ั ะปะธัั‚ะฐ (CharsView):\n path('upd_character_fl/<int:pk>/', UpdCharacterNoFeederView.as_view(), name='upd_charact_fl'),\n # ะดะปั ะฟั€ะฐะฒะบะธ ั ั„ะธะดะตั€ะฐ:\n path('upd_character/<int:pk>/', UpdCharacterFeederView.as_view(), name='upd_charact'),\n\n # !ะฝะต ั€ะฐะฑะพั‚ะฐัŽั‚!\n # path('subscriber_autocomplete/', SubscriberAutocompleteView.as_view(), name='subscriber_autocomplete'),\n # path('substation_autocomplete/', SubstationAutocompleteView.as_view(), name='substation_autocomplete'),\n\n]\n" } ]
22
logc/githubland
https://github.com/logc/githubland
5982d388f8ac47c4de364624079160d7a1f8e40d
6d51cf477adcf625887e7255495413c344d93b06
08afbadc6e328f414a2f9e32f9f1aff8e3d59869
refs/heads/master
2016-08-06T12:22:06.184903
2014-11-28T11:31:36
2014-11-28T11:31:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 16.600000381469727, "blob_id": "88faa30c9ff6a926aaaefb3ccc7c6538d410fbf1", "content_id": "78f95b586f66a79d881926460065c8e44b1078b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 264, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/doc/makefile", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "all:\n\tpandoc -t beamer --template custom.beamer \\\n\t\t-V graphics \\\n\t\t--listings \\\n\t\t-V theme=boxes \\\n\t\t-V colortheme=beaver \\\n\t\t-V fonttheme=professionalfonts \\\n\t\t-H preamble.tex \\\n\t\t-s slides.md -o slides.pdf\n\npresentation:\n\topen slides.pdf\n\nclean:\n\trm slides.pdf\n" }, { "alpha_fraction": 0.6061610579490662, "alphanum_fraction": 0.6145447492599487, "avg_line_length": 35.11971664428711, "blob_id": "42e9e8d5cda93a815c2b0aaa3e74ec40f78b3bb5", "content_id": "a61091b5172fe4fb3dad0a193c8e0fb311d33c3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5129, "license_type": "no_license", "max_line_length": 78, "num_lines": 142, "path": "/maps.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule maps\n\nProduces the maps of Europe where country names have been subsituted by a\nprogramming language. This language can be the Xth most preferred language as\ncounted on Github commits originating from that country, or it can be the Xth\nmost preferred language excluding some languages.\n\"\"\"\nimport sys\nimport logging\nfrom itertools import imap, ifilter\n\nimport fiona\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom shapely.geometry import Polygon\n\nimport bigquery\nfrom countries import get_european_country_names\n\n\nSEA_COLOR = 'silver'\nLAND_COLOR = 'white'\nTEXT_COLOR = 'black'\nCOUNTRY_COLOR = 'darkviolet'\n\nplt.rc('text', usetex=True)\n# This line is required by the matplotlib API in order to change the default\n# Tex font\n# pylint: disable=star-args\nplt.rc('font', **{'family': 'serif', 'serif': ['New Century Schoolbook']})\n# pylint: enable=star-args\n\n\ndef unnest(nested_list):\n \"\"\"\n Returns a flat list out of a nested list\n \"\"\"\n depth = lambda nested_list: isinstance(nested_list, list) and max(\n imap(depth, nested_list)) + 1\n current_depth = depth(nested_list)\n while current_depth > 1:\n lengths = list(imap(len, nested_list))\n if all([length == 1 for length in lengths]):\n nested_list = [item for sublist\n in nested_list for item in sublist]\n current_depth -= 1\n continue\n nested_list = nested_list[lengths.index(max(lengths))]\n current_depth -= 1\n return nested_list\n\n\ndef draw_map_with_labels(labels, map_number):\n \"\"\"\n Draws a map once the labels substituting country names are given\n \"\"\"\n min_lon = -20.\n max_lon = 49.\n min_lat = 32.\n max_lat = 60.\n europe = Basemap(\n resolution='l',\n projection='aea',\n lon_0=0,\n lat_0=40,\n llcrnrlat=min_lat,\n urcrnrlat=max_lat,\n llcrnrlon=min_lon,\n urcrnrlon=max_lon,\n lat_ts=(min_lon+max_lon)/2)\n europe.drawcountries(linewidth=0.2, color=COUNTRY_COLOR)\n europe.drawmapboundary(linewidth=0.5, fill_color=SEA_COLOR)\n europe.fillcontinents(color=LAND_COLOR, lake_color=SEA_COLOR)\n europe.drawcoastlines(linewidth=0.2)\n for label in labels:\n lon, lat = europe(label[1], label[2])\n plt.text(lon, lat, label[0],\n color=TEXT_COLOR, fontweight='heavy', fontstyle='oblique',\n ha='center', clip_on=True)\n plt.tight_layout()\n logging.info('Saving into file: languages_{}.png'.format(map_number + 1))\n plt.savefig('languages_{}.png'.format(map_number + 1))\n\n\ndef draw_map_for_popularity(project_number, popularity=0, excluded=None):\n \"\"\"\n Draw map showing languanges at a certain position in the popularity list.\n Default position is 0, i.e. the most popular language\n \"\"\"\n boundaries = fiona.open(\n 'borders/world_country_admin_boundary_shapefile_with_fips_codes.shp')\n european = get_european_country_names()\n\n def is_european(rec):\n \"\"\" Returns True if boundary is an european country \"\"\"\n return rec['properties']['CNTRY_NAME'] in european\n\n european_boundaries = ifilter(is_european, boundaries)\n\n labels = []\n for european_boundary in european_boundaries:\n ignored = ['Luxembourg', 'Andorra', 'Liechtenstein', 'Macedonia',\n 'Malta', 'Monaco', 'San Marino', 'Vatican City',\n 'Northern Cyprus']\n try:\n name = european_boundary['properties']['CNTRY_NAME']\n if name in ignored:\n continue\n logging.debug(\"Querying BigQuery about {0}\".format(name))\n language = bigquery.get_most_popular_language(\n project_number, name, popularity)\n if excluded:\n while language in excluded:\n popularity = popularity + 1\n language = bigquery.get_most_popular_language(\n project_number, name, popularity)\n if language == 'JavaScript':\n language = 'JS'\n logging.debug(\n \"#{0} most popular language in {1} is {2}\".format(\n popularity + 1, name, language))\n crds = european_boundary['geometry']['coordinates']\n crds = unnest(crds)\n polygon = Polygon(crds)\n if name == 'Russia':\n label = (language, 37, 55) # Moscow lon, lat\n labels.append(label)\n continue\n # The Polygon x and y members seem to be constructed at runtime\n # pylint: disable=no-member\n label = (language, polygon.centroid.x, polygon.centroid.y)\n # pylint: enable=no-member\n logging.debug(label)\n labels.append(label)\n except (KeyboardInterrupt, RuntimeError):\n logging.exception(\"An exception happened:\")\n logging.warning(\n \"Writing output to sample_{}.png\".format(popularity))\n draw_map_with_labels(labels, popularity)\n sys.exit(1)\n draw_map_with_labels(labels, popularity)\n" }, { "alpha_fraction": 0.6829738020896912, "alphanum_fraction": 0.6910631656646729, "avg_line_length": 35.05555725097656, "blob_id": "e7f9ddff8780f5b65820e3ed50ea828011b1ff51", "content_id": "e3eb3a8d99c2208eab96d5239a31f18ace8524e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2596, "license_type": "no_license", "max_line_length": 79, "num_lines": 72, "path": "/bigquery.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule bigquery\n\nQuery the Google BigQuery service about Github public timeline.\nAlso, create the necessary credentials if not already created.\n\"\"\"\nimport logging\n\nimport httplib2\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\nfrom filecache import filecache\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\n\n\nFLOW = flow_from_clientsecrets(\n 'client_secrets.json',\n scope='https://www.googleapis.com/auth/bigquery')\n\n\ndef get_most_popular_language(project_number, country='France', offset=0):\n \"\"\"Returns the most popular language for a country, with an offset, i.e. if\n offset is 1, then the second most popular language is returned.\"\"\"\n langs = get_languages_by_popularity(country, project_number)\n return langs[offset - 1]\n\n\ndef get_authorized_http():\n \"\"\"Returns an authorized Http instance, for use in queries to the\n service\"\"\"\n storage = Storage('bigquery_credentials.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n from oauth2client import tools\n # Run oauth2 flow with default arguments.\n credentials = tools.run_flow(\n FLOW, storage, tools.argparser.parse_args([]))\n http = httplib2.Http()\n http = credentials.authorize(http)\n return http\n\n\n@filecache(2 * 30 * 24 * 3600)\ndef get_languages_by_popularity(country, project_number):\n \"\"\"Queries the service about language popularity for a specific country,\n returning all aggregated commit counts grouped by language, in\n descending order of count.\"\"\"\n bigquery_service = build('bigquery', 'v2', http=get_authorized_http())\n query_data = {'query': (\n \"SELECT repository_language AS lang, count(repository_name) AS counts \"\n \"FROM [githubarchive:github.timeline] \"\n \"WHERE actor_attributes_location CONTAINS '{0}' \"\n \"AND repository_language IS NOT NULL \"\n \"GROUP BY lang \"\n \"ORDER BY counts DESC \").format(country)}\n # The Resource instance gets the `jobs` member at runtime\n # pylint: disable=no-member\n query_request = bigquery_service.jobs()\n try:\n query_response = query_request.query(\n projectId=project_number, body=query_data).execute()\n except HttpError as err:\n raise RuntimeError(err)\n languages = []\n for row in query_response['rows']:\n try:\n languages.append(row['f'][0]['v'])\n except KeyError:\n logging.exception(query_response.keys())\n languages.append('No data')\n return languages\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6855345964431763, "avg_line_length": 25.5, "blob_id": "3b2b2e5df8faeef2407c11997655763935cd73cd", "content_id": "27e9eedddc00889656dcf0ff3ca20282046c42dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/setup.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"This is the setup module for githubland\"\"\"\nfrom setuptools import setup\n\nsetup(name='githubland',\n version='0.1.0',\n test_suite='nose.collector')\n" }, { "alpha_fraction": 0.621463418006897, "alphanum_fraction": 0.6439024209976196, "avg_line_length": 27.47222137451172, "blob_id": "6b6ec0c3d80ef103145ca32e5f81fe39003524fb", "content_id": "e6b6f053de32519acfdbff9f21ac312d30f12c6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2050, "license_type": "no_license", "max_line_length": 69, "num_lines": 72, "path": "/test_main.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule test_main\n\nTests for the command line interface in the main module\n\"\"\"\nimport sys\nimport argparse\nimport shlex\n\nfrom mock import patch\n# Pylint does not work well with nose.tools\n# pylint: disable=no-name-in-module\nfrom nose.tools import assert_true\n# pylint: enable=no-name-in-module\n\nimport main\n\n\n@patch('main.handle_maps')\ndef test_draw_maps(mock_maps_handler):\n sys.argv = shlex.split(\n 'program maps 1111 --popularity 2 --excluded=JavaScript')\n main.main()\n assert_true(mock_maps_handler.called)\n mock_maps_handler.assert_called_once_with(\n argparse.Namespace(\n excluded=['JavaScript'],\n func=main.handle_maps,\n log_level=21,\n popularity=2,\n project_number='1111'))\n\n\n@patch('main.handle_maps')\ndef test_draw_maps_no_excludes(mock_maps_handler):\n sys.argv = shlex.split('program maps 1111 --popularity 1')\n main.main()\n assert_true(mock_maps_handler.called)\n mock_maps_handler.assert_called_once_with(\n argparse.Namespace(\n excluded=None,\n func=main.handle_maps,\n log_level=21,\n popularity=1,\n project_number='1111'))\n\n\n@patch('main.handle_maps')\ndef test_draw_maps_multiple_excludes(mock_maps_handler):\n sys.argv = shlex.split(\n 'program maps 1111 --popularity 2 --excluded JavaScript PHP')\n main.main()\n assert_true(mock_maps_handler.called)\n mock_maps_handler.assert_called_once_with(\n argparse.Namespace(\n excluded=['JavaScript', 'PHP'],\n func=main.handle_maps,\n log_level=21,\n popularity=2,\n project_number='1111'))\n\n\n@patch('main.handle_correlate')\ndef test_correlate(mock_correlate_handler):\n sys.argv = shlex.split('program correlate 1111')\n main.main()\n assert_true(mock_correlate_handler.called)\n mock_correlate_handler.assert_called_once_with(\n argparse.Namespace(\n func=main.handle_correlate,\n log_level=21,\n project_number='1111'))\n" }, { "alpha_fraction": 0.6472945809364319, "alphanum_fraction": 0.6593186259269714, "avg_line_length": 31.54347801208496, "blob_id": "2276f83cedb78556c6ccc996f1cf1cb360122468", "content_id": "99c2036dedab32daa347a641b200b8268631eb64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "no_license", "max_line_length": 83, "num_lines": 46, "path": "/countries.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nAs read in:\nhttp://jcastellssala.wordpress.com/2012/06/19/europe-countries-dbpediasparqlpython/\n\"\"\"\nimport logging\nimport posixpath\nimport re\nfrom urlparse import urlparse\n\nfrom filecache import filecache\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\n\n@filecache(30 * 24 * 3600)\ndef get_european_country_names():\n \"\"\"Get European country names from a remote service that knows about such\n entities as countries and Europe.\"\"\"\n logging.debug(\"Querying DBpedia about Europe\")\n sparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\n sparql.setReturnFormat(JSON)\n\n sparql.setQuery(\"\"\"\n PREFIX dcterms: <http://purl.org/dc/terms/>\n PREFIX dbprop: <http://dbpedia.org/property/>\n PREFIX yago: <http://dbpedia.org/class/yago/>\n PREFIX dbpedia-owl: <http://dbpedia.org/ontology/>\n\n SELECT DISTINCT ?place WHERE {\n ?place a yago:EuropeanCountries.\n ?place a dbpedia-owl:Country.\n }\n \"\"\")\n\n results = sparql.query().convert()\n names = []\n for result in results['results']['bindings']:\n path = urlparse(result['place']['value']).path\n path_parts = posixpath.split(path)\n assert len(path_parts) == 2, \"result path malformed\"\n name = path_parts[-1]\n name = re.sub(r\"_\", \" \", name)\n name = re.sub(r\"Republic of \", \"\", name)\n name = re.sub(r\" \\(country\\)\", \"\", name)\n names.append(name)\n logging.warning(\"Found the following countries: {}\".format(sorted(names)))\n return sorted(names)\n" }, { "alpha_fraction": 0.6056118607521057, "alphanum_fraction": 0.62431800365448, "avg_line_length": 29.547618865966797, "blob_id": "1ce6d986944776610bb777170582ae682d056d8c", "content_id": "e723d00078fecd00b95228cd31456520e642d48f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/test_maps.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule test_maps\n\nTests for the `maps` module\n\"\"\"\nfrom collections import OrderedDict\n\nfrom mock import patch, ANY\n# Pylint does not work well with nose.tools\n# pylint: disable=no-name-in-module\nfrom nose.tools import assert_equal, ok_\n# pylint: enable=no-name-in-module\n\nimport maps\n\n\ndef test_choosing_coordinates():\n \"\"\"Test that coordinates are well chosen\"\"\"\n boundary = {'geometry': {'coordinates': [[[\n (0, 0),\n (0, 0)]],\n [[(1, 0),\n (1, 0),\n (1, 0)]]],\n 'type': 'MultiPolygon'},\n 'id': '230',\n 'properties': OrderedDict(\n [(u'FIPS_CNTRY', u'UK'), (u'CNTRY_NAME', u'United Kingdom')]),\n 'type': 'Feature'}\n coords = boundary['geometry']['coordinates']\n ok_(len(coords[0][0]) < len(coords[1][0]))\n assert_equal(maps.unnest(coords), coords[1][0])\n\n\n@patch('maps.draw_map_with_labels')\n@patch('bigquery.get_most_popular_language')\ndef test_draw_map_excluding(mock_bigquery, mock_do_draw):\n \"\"\"Test how to draw a map excluding some languages from possible results\"\"\"\n fake_project_number = 22\n mock_bigquery.side_effect = lambda x, y, z: 'Python'\n maps.draw_map_for_popularity(fake_project_number)\n mock_do_draw.assert_called_once_with([('Python', ANY, ANY)] * 39, 0)\n" }, { "alpha_fraction": 0.6348208785057068, "alphanum_fraction": 0.6436203718185425, "avg_line_length": 26.431034088134766, "blob_id": "011f05f1980d04bd6abaf2fdebc8157589a31a2b", "content_id": "8b341121bf889be461efc3ecc40cb90620919a9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/main.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule main\n\nEntry point for this project\n\"\"\"\nimport argparse\nimport logging\nimport time\n\nimport maps\nimport correlations\n\n\ndef handle_maps(args):\n maps.draw_map_for_popularity(\n args.project_number, args.popularity, args.excluded)\n\n\ndef handle_correlate(args):\n correlations.produce_all_figures(args.project_number)\n\n\ndef parse_command_line():\n \"\"\"Parse the command line\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l', '--log-level', metavar='LEVEL',\n action='store', dest='log_level', default=21,\n type=int, choices=xrange(51),\n help='1 DEBUG; 11 INFO; 21 WARNING; 31 ERROR; 41 CRITICAL')\n subparsers = parser.add_subparsers()\n parser_maps = subparsers.add_parser('maps')\n parser_maps.add_argument('project_number')\n parser_maps.add_argument('-p', '--popularity', type=int, default=1)\n parser_maps.add_argument('-e', '--excluded', nargs='+')\n parser_maps.set_defaults(func=handle_maps)\n parser_corr = subparsers.add_parser('correlate')\n parser_corr.add_argument('project_number')\n parser_corr.set_defaults(func=handle_correlate)\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Main function\"\"\"\n args = parse_command_line()\n logging.basicConfig(\n format='%(asctime)s %(module)s.%(funcName)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S', level=args.log_level)\n start = time.time()\n try:\n args.func(args)\n finally:\n logging.info(\n 'Done in {} seconds'.format(time.time() - start))\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7350746393203735, "alphanum_fraction": 0.7487562298774719, "avg_line_length": 30.162790298461914, "blob_id": "47c64c04e6ad6791febd3b451a72de7425d55dd1", "content_id": "9f925d3ecccd26f9d16935fc44ada8b447e37c52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4020, "license_type": "no_license", "max_line_length": 116, "num_lines": 129, "path": "/doc/slides.md", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "% Githubland\n% Luis Osa \\scriptsize\\<[email protected]\\>\n% November 2014\n\n# Motivation: No code rule\n\n- \"Data beers\" rules do not allow to show code during talks.\n\n- But **data is the same as code**! \n\n (cf. [Lisp\n homoiconicity](http://en.wikipedia.org/wiki/Homoiconicity#Homoiconicity_in_Lisp),\n [Unix \"rule of\n representation\"](http://en.wikipedia.org/wiki/Unix_philosophy))\n\n# Where is there a lot of code? Github\n\n$\\vcenter{\\hbox{\\includegraphics[width=.15\\textwidth, height=.5\\textheight]{img/Git-logo.pdf}}}$\nis the VCS developed by the Linux kernel team\n\n$\\vcenter{\\hbox{\\includegraphics[width=.1\\textwidth, height=.5\\textheight]{img/octocat.png}}}$\n$\\vcenter{\\hbox{\\includegraphics[width=.15\\textwidth, height=.5\\textheight]{img/GitHub_logo_2013.pdf}}}$\nis a Git repository web-based hosting service\n\n# Inspiration: Blatt maps\n\n- U.S. Census Bureau data on second languages in American households [^3]\n\n\\includegraphics[width=.5\\textwidth]{img/usa1.png} \\ \\ \n\\includegraphics[width=.5\\textwidth]{img/usa2.png}\n\n[^3]: http://gizmodo.com/the-most-common-languages-spoken-in-the-u-s-state-by-1575719698\n\n# European (programming) languages\n\n![Most popular languages](img/languages.png)\n\n# European (programming) languages\n\n![The problem is Octopress](img/languages_and_octopress.png)\n\n# European (programming) languages\n\n![Most popular languages excluding JavaScript](img/languages_excluding_JS.png)\n\n# European (programming) languages\n\n![The problem is the web](img/languages_and_web.png)\n\n# European (programming) languages\n\n![Most popular languages excluding JavaScript and PHP](img/languages_excluding_JS_PHP.png)\n\n# Processing Github information\n\n- Github offers a REST API, but it has rate limits\n- [GitHub Archive](http://www.githubarchive.org) publishes all public commits\n in hourly archives\n- [Google BigQuery](https://cloud.google.com/bigquery/) has the Github timeline\n as public data\n\n\\centering{\\includegraphics[width=.5\\textwidth, height=.4\\textheight]{img/bigquery-logo.png}}\n\n# Which countries are there in Europe?\n\n- There may be new countries:\n$\\vcenter{\\hbox{\\includegraphics[width=.25\\textwidth, height=.78\\textheight]{img/new_countries.png}}}$\n\n- There may be less countries:\n$\\vcenter{\\hbox{\\includegraphics[width=.125\\textwidth, height=.78\\textheight]{img/ukraine.png}}}$\n\n- A solution: DBpedia and SPARQL\n\nDBpedia has a [SPARQL endpoint](http://dbpedia.org/sparql) to **receive\nqueries**. There are [wrapper\nlibraries](https://pypi.python.org/pypi/SPARQLWrapper) \n\n# No Twitter\n\n\\centering{\\includegraphics[width=.5\\textwidth, height=.4\\textheight]{img/no_twitter.jpg}}\n\n- Quite tired of people categorizing tweets. There are many APIs out there!\n- Do not worry, we are still going to get rich! $\\rightarrow$ using World Bank macroeconomic data [^sherouse]\n\n[^sherouse]: Sherouse, Oliver (2014). Wbdata. Arlington, VA. Available from http://github.com/OliverSherouse/wbdata.\n\n# Google Correlations\n\n![\"Clojure programming destroys jobs\", Del Cacho, Carlos, 2014](img/screenshot.png)\n\n# $corr(GDP, language)$\n\n![Pearson correlation of GDP with language preference [^negative]](img/gdp_corr.png)\n\n[^negative]: Negative values denote a language used in richer countries; a\nlow value in the language precedence means a higher place in the language\npreference list for a country.\n\n# $corr(unemployment, language)$\n\n![Pearson correlation of unemployment with language preference[^positive]](img/unemp_corr.png)\n\n[^positive]: Positive values show preferred languages in countries with low unemployment\n\n# $corr(debt, language)$\n\n![Pearson correlation of total government debt as % of GDP with language preference[^positive2]](img/debt_corr.png)\n\n[^positive2]: Positive values show preferred languages in countries with low debt\n\n# Take away messages\n\n- Data talk about code!\n\n. . . \n\n- SPARQL and other APIs: all data is on your laptop\n\n. . . \n\n- BigQuery and other tools: your laptop controls clusters\n\n. . . \n\n- **All languages are beautiful**\n\n. . . \n\n- but do not program in OCaml if you can avoid it\n" }, { "alpha_fraction": 0.6824034452438354, "alphanum_fraction": 0.7038626670837402, "avg_line_length": 32.28571319580078, "blob_id": "2bb4e1f4dc4b1e037e4da568100ccdc9cba48e3a", "content_id": "abb6de77968a06408e26573d8773ac1f95228fe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 233, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/README.rst", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "Data sources\n------------\n\n- Github Search - https://api.github.com/search\n- Github Users - https://api.github.com/users/\n- DBpedia SPARQL endpoint - http://dbpedia.org/sparql\n- Boundaries - http://geocommons.com/overlays/33578.html\n" }, { "alpha_fraction": 0.6297380924224854, "alphanum_fraction": 0.6393866539001465, "avg_line_length": 33.141178131103516, "blob_id": "4f2538670596ba62f659de72408cf45700e30a5e", "content_id": "a005ee074e6b7e40e420b3b94cd31a1fd23fd2b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5804, "license_type": "no_license", "max_line_length": 79, "num_lines": 170, "path": "/correlations.py", "repo_name": "logc/githubland", "src_encoding": "UTF-8", "text": "\"\"\"\nModule correlations\n\nProduce the correlation graphs between language preference and country\nmacroeconomic quantities.\n\"\"\"\nimport datetime\n\nimport wbdata\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nimport bigquery\nfrom countries import get_european_country_names\n\n\nDATA_DATE = (datetime.datetime(2013, 1, 1), datetime.datetime(2014, 1, 1))\nDATA_DATE2 = (datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31))\n\n# This line is required by matplotlib to change the default Tex font\n# pylint: disable=star-args\nrc('font', **{'family': 'serif', 'serif': ['Palatino']})\n# pylint: enable=star-args\nrc('text', usetex=True)\n\n\ndef get_european_countries():\n \"\"\"\n Returns a list of all european countries as names, excluding some small\n countries.\n \"\"\"\n ignored = [\n 'Luxembourg', 'Andorra', 'Liechtenstein', 'Macedonia', 'Malta',\n 'Monaco', 'San Marino', 'Vatican City', 'Northern Cyprus']\n return [country for country in get_european_country_names()\n if country not in ignored]\n\n\ndef get_countries_as_iso_codes():\n \"\"\"\n Returns a list of countries as ISO codes\n \"\"\"\n iso_codes = {}\n for country in get_european_countries():\n iso_code = wbdata.search_countries(country, display=False)\n if len(iso_code) == 1:\n iso_codes[country] = iso_code[0]['id']\n return iso_codes.values()\n\n\ndef get_rankings(language, project_number):\n \"\"\"\n Gets the rankings of a specific language per country. If it is the most\n preferred language, it gets number 0, if it is the second then 1, etc ...\n \"\"\"\n rankings = {}\n prefs_lists = {}\n for country in get_european_countries():\n prefs_lists[country] = bigquery.get_languages_by_popularity(\n country, project_number)\n for country, languages_sorted_by_preference in prefs_lists.iteritems():\n try:\n rankings[country] = languages_sorted_by_preference.index(language)\n except ValueError: # the language does not appear in the list\n rankings[country] = None\n return rankings\n\n\ndef get_economic_dataframes():\n \"\"\"\n Returns dataframes for GDP at PPP, unemployment, and total government\n debt, per country\n \"\"\"\n countries = get_countries_as_iso_codes()\n ppps = wbdata.get_dataframe(\n {\"NY.GDP.PCAP.PP.KD\": \"gdpppp\"}, country=countries,\n data_date=DATA_DATE)\n unemployement = wbdata.get_dataframe(\n {\"SL.UEM.TOTL.ZS\": \"percent\"}, country=countries, data_date=DATA_DATE2)\n debt = wbdata.get_dataframe(\n {\"GC.DOD.TOTL.GD.ZS\": \"debt\"}, country=countries, data_date=DATA_DATE2)\n return ppps, unemployement, debt\n\n\nLANGUAGES = ['Haskell', 'Ruby', 'Clojure', 'Java', 'C', 'C++', 'Python',\n 'JavaScript', 'Scheme', 'OCaml']\n\n\ndef add_language_rankings(dataframes, project_number):\n \"\"\" Adds a new column about language preferences to economic dataframes \"\"\"\n ppps, unemployement, debt = dataframes\n for lang in LANGUAGES:\n series = pd.Series(get_rankings(lang, project_number))\n ppps[lang] = series\n unemployement[lang] = series\n debt[lang] = series\n return ppps, unemployement, debt\n\n\ndef correlate(project_number):\n \"\"\" Correlates economic to language preference columns in dataframes \"\"\"\n dataframes = get_economic_dataframes()\n ppps, unemployement, debt = add_language_rankings(\n dataframes, project_number)\n unemployement = unemployement.dropna()\n gdp_correlations = []\n unemployment_corrs = []\n debt_corrs = []\n for lang in LANGUAGES:\n # No need to `dropna`, since `Series.corr` drops missing values\n gdp_correlations.append(tuple([lang, ppps.gdpppp.corr(ppps[lang])]))\n unemployment_corrs.append(tuple([\n lang, unemployement.percent.corr(unemployement[lang])]))\n debt_corrs.append(tuple([\n lang, debt.debt.corr(debt[lang])]))\n gdp_correlations.sort(key=lambda x: x[1])\n unemployment_corrs.sort(key=lambda x: x[1], reverse=True)\n debt_corrs.sort(key=lambda x: x[1], reverse=True)\n return gdp_correlations, unemployment_corrs, debt_corrs\n\n\ndef produce_figure(correlations, colorname, measure_name, filename):\n \"\"\"\n Produce a bar plot figure out of the passed in correlations\n \"\"\"\n def autolabel(rects, axes):\n \"\"\"\n Label bars with a text right above or below the bar\n \"\"\"\n for rect in rects:\n height = rect.get_y()\n vertical = 'top'\n if height == 0:\n height = rect.get_height()\n vertical = 'bottom'\n axes.text(\n rect.get_x()+rect.get_width()/2.,\n 1.05*height,\n '%.2f' % height,\n ha='center', va=vertical)\n\n # pylint: disable=no-member\n x_values = np.arange(len(correlations))\n # pylint: enable=no-member\n width = 0.6\n\n _, axes = plt.subplots()\n y_values = [x[1] for x in correlations]\n rects = axes.bar(x_values, y_values, width, color=colorname, alpha=0.4)\n axes.set_ylabel(r'$\\rho(%s,lang)$' % measure_name)\n axes.set_xticks(x_values+width/2.0)\n plt.ylim([1.15*min(y_values), 1.4*max(y_values)])\n langs = []\n for lang, _ in correlations:\n if lang != 'JavaScript':\n langs.append(lang)\n else:\n langs.append('JS')\n axes.set_xticklabels(tuple(langs), rotation=45)\n autolabel(rects, axes)\n plt.savefig(filename)\n\n\ndef produce_all_figures(project_number):\n GDP_CORRS, UNEMPLOYMENT_CORRS, DEBT_CORRS = correlate(project_number)\n produce_figure(GDP_CORRS, 'y', 'GDP', 'gdp_corr.png')\n produce_figure(UNEMPLOYMENT_CORRS, 'r', 'U', 'unemp_corr.png')\n produce_figure(DEBT_CORRS, 'b', 'D', 'debt_corr.png')\n" } ]
11
rSkogeby/web-server
https://github.com/rSkogeby/web-server
f1abf89e1b2245a13c582cc87e9b40e9c668197e
362f25be49e359b9fcf8cf4d1038d59319f96020
0ff985c7323f40ea4fbc434200d4458016b7f71f
refs/heads/master
2020-04-22T12:14:08.440638
2019-02-18T08:16:46
2019-02-18T08:16:46
170,365,787
2
3
null
null
null
null
null
[ { "alpha_fraction": 0.4808034300804138, "alphanum_fraction": 0.48833560943603516, "avg_line_length": 42.64840316772461, "blob_id": "182c6b483890c669a1eef9a66fc50a6f8ac2b0a4", "content_id": "cf1ea56cd3e2364c2e143d3d3d43b8600cdb3379", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9559, "license_type": "permissive", "max_line_length": 80, "num_lines": 219, "path": "/setup.py", "repo_name": "rSkogeby/web-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Host a web server.\n\nWeb server implementing SQLAlchemy to demonstrate basic CRUD operations.\n\"\"\"\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport cgi\nimport cgitb\ncgitb.enable()\n\nfrom db_setup import Base, Restaurant, MenuItem\n\n\nclass webserverHandler(BaseHTTPRequestHandler):\n \"\"\"Fetch definition of http method.\"\"\"\n\n def do_GET(self):\n \"\"\"Run http GET request.\"\"\"\n try:\n if self.path.endswith('/restaurant'):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n restaurants = session.query(Restaurant).all()\n output = '<html><body>'\n output += '<h4>'\n output += '<a href=\"/restaurant/new\">Make a New Restaurant</a>'\n output += '</h4>'\n for restaurant in restaurants:\n output += '''{} <a href=\"/restaurant/{}/edit\">Edit</a> |\n <a href=\"/restaurant/{}/delete\">Delete</a>'''\\\n .format(restaurant.name, restaurant.id,\n restaurant.id)\n output += '<br />'\n output += '</html></body>'\n session.close()\n\n self.wfile.write(output.encode())\n return\n elif self.path.endswith('/restaurant/new'):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n output = '<html><body>'\n output += '<h4>'\n output += '<a href=\"/restaurant\">Back to restaurant list</a>'\n output += '</h4>'\n output += '''<form method = \"POST\" enctype =\n \"multipart/form-data\" action = \"/restaurant/new\"><h2>Enter new\n restaurant name: </h2><input name=\"newRestaurantName\" type=\n \"text\"><input type=\"submit\" value=\"Add\"></form>'''\n output += '</html></body>'\n self.wfile.write(output.encode())\n return\n elif self.path.endswith('/edit'):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n restaurant = session.query(Restaurant).\\\n filter_by(id=self.path.split('/')[2]).one()\n output = '<html><body>'\n output += '<h4>'\n output += '<a href=\"/restaurant\">Back to restaurant list</a>'\n output += '</h4>'\n output += '<h3>'\n output += '{}'.format(restaurant.name)\n output += '</h3>'\n output += '''<form method=\"POST\" enctype=\"multipart/form-data\"\n action=\"edit\"><h4>Edit restaurant name:</h4><input name=\n \"restaurantName\" type = \"text\"><input type = \"submit\"\n value = \"Change\"></form>'''\n output += '</html></body>'\n self.wfile.write(output.encode())\n session.close()\n return\n elif self.path.endswith('/delete'):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n restaurant = session.query(Restaurant).\\\n filter_by(id=self.path.split('/')[2]).one()\n output = '<html><body>'\n output += '<h4>'\n output += '<a href=\"/restaurant\">Back to restaurant list</a>'\n output += '</h4>'\n output += '<h3>'\n output += '{}'.format(restaurant.name)\n output += '</h3>'\n output += '''<form method=\"POST\" enctype=\"multipart/form-data\"\n action=\"delete\"><h4>By pressing confirm this restaurant will be\n permanently removed from the database. </h4><button type=\n \"submit\" name=\"confirm\" value=\"True\">Confirm</button>'''\n output += '</html></body>'\n self.wfile.write(output.encode())\n session.close()\n return\n else:\n self.send_response(301)\n self.send_header('Location', '/restaurant')\n self.end_headers()\n return\n except IOError as e:\n self.send_error(404, 'File Not Found %s', self.path)\n return\n\n def do_POST(self):\n \"\"\"Change methods for creating, updating and deleting db entries.\"\"\"\n try:\n if self.path.endswith('/restaurant/new'):\n c_type, p_dict = cgi.parse_header(\n self.headers.get('Content-Type')\n )\n content_len = int(self.headers.get('Content-length'))\n p_dict['boundary'] = bytes(p_dict['boundary'], \"utf-8\")\n p_dict['CONTENT-LENGTH'] = content_len\n message_content = ''\n if c_type == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, p_dict)\n message_content = fields.get('newRestaurantName')\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n if isinstance(message_content[0], type(b'')):\n new_restaurant = Restaurant(\n name=message_content[0].decode()\n )\n else:\n new_restaurant = Restaurant(\n name=message_content[0]\n )\n session.add(new_restaurant)\n session.commit()\n session.close()\n self.send_response(301)\n self.send_header('Content-type', 'text/html')\n self.send_header('Location', '/restaurant')\n self.end_headers()\n return\n elif self.path.endswith('/edit'):\n c_type, p_dict = cgi.parse_header(\n self.headers.get('Content-Type')\n )\n content_len = int(self.headers.get('Content-length'))\n p_dict['boundary'] = bytes(p_dict['boundary'], \"utf-8\")\n p_dict['CONTENT-LENGTH'] = content_len\n message_content = ''\n if c_type == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, p_dict)\n new_name = fields.get('restaurantName')\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n restaurant = session.query(Restaurant).\\\n filter_by(id=self.path.split('/')[2]).one()\n if isinstance(new_name[0], type(b'')):\n restaurant.name = new_name[0].decode()\n else:\n restaurant.name = new_name[0]\n session.add(restaurant)\n session.commit()\n session.close()\n self.send_response(301)\n self.send_header('Content-type', 'text/html')\n self.send_header('Location', '/restaurant')\n self.end_headers()\n elif self.path.endswith('/delete'):\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n restaurant = session.query(Restaurant).\\\n filter_by(id=self.path.split('/')[2]).one()\n session.delete(restaurant)\n session.commit()\n session.close()\n self.send_response(301)\n self.send_header('Content-type', 'text/html')\n self.send_header('Location', '/restaurant')\n self.end_headers()\n else:\n self.send_response(301)\n self.send_header('Location', '/')\n self.end_headers()\n return\n except IOError as e:\n self.send_error(404, 'File Not Found %s', self.path)\n\n\ndef main():\n \"\"\"Serve up an http server on port 8080.\"\"\"\n try:\n port = 8080\n server = HTTPServer(('', port), webserverHandler)\n print('Server running on port %s' % port)\n server.serve_forever()\n except KeyboardInterrupt as e:\n print('^C entered, stopping web server...')\n server.socket.close()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7039006948471069, "alphanum_fraction": 0.716312050819397, "avg_line_length": 13.868420600891113, "blob_id": "09fa3700add749d62657682caeb99ccbf084be47", "content_id": "7ddf96af87964ca6a087375c9fa4ed7448bc6493", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 564, "license_type": "permissive", "max_line_length": 92, "num_lines": 38, "path": "/README.md", "repo_name": "rSkogeby/web-server", "src_encoding": "UTF-8", "text": "# Web Server\n\nWeb server implementing SQLAlchemy to demonstrate basic CRUD operations.\n\n## Requirements\n\n- [SQLAlchemy](https://www.sqlalchemy.org/)\n- [cgi](https://docs.python.org/3/library/cgi.html)\n- restaurantmenu.db\n- setup.py\n- db_setup.py\n\n## Usage\n\nInstall SQLAlchemy:\n\n```bash\npip3 install sqlalchemy\n```\n\nIn repository run\n\n```bash\npython3 setup.py\n```\n\nor \n\n```bash\nchmod +x setup.py\n./setup.py\n```\n\nOpen a browser and go to _localhost:8080_. Create, read, update and delete database entries.\n\n## License\n\n[MIT](https://choosealicense.com/licenses/mit/)" } ]
2
ccmkorea/real-python-test
https://github.com/ccmkorea/real-python-test
794d318498245e7bf630bd29038c44f45fb70372
ac21684b5d67f8c84978c46acdb864ba62697735
050b423456dc97eb3d65507913c9af07b571e2f9
refs/heads/master
2021-07-24T03:54:44.756501
2017-11-05T18:54:08
2017-11-05T18:54:08
109,606,170
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.8222222328186035, "avg_line_length": 45, "blob_id": "afa6cbcb95c9efcf06f83c88567cdd1743bb7c04", "content_id": "12cdbaba91e4cea432f0967b656e1637b00f1b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/test/lib/python3.6/enum.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/enum.py" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 51, "blob_id": "bc5818e244543c243479541a4c3fa2cece98c0a4", "content_id": "dce816db3d97fdb83991099683545dd821748e6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/test/lib/python3.6/__future__.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/__future__.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "52f08f0b4670e840a3cc5944d63cf79a67379b3d", "content_id": "f191e2f78fe39c897da87a5b0684a1ac67d4e2bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/reprlib.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/reprlib.py" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 50, "blob_id": "8ab3699475eeeff4ac2becd9bfa4662102ec22b0", "content_id": "5611cb56ab2e6210d90a695c59c256defcbec6b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/test/lib/python3.6/linecache.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/linecache.py" }, { "alpha_fraction": 0.760869562625885, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 46, "blob_id": "a33a0d51e88f5d7df0d1ec9ba4b61b4c11b2861e", "content_id": "3abf9747c09d5303476947f632c5e84ebf3c3a19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/test/lib/python3.6/types.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/types.py" }, { "alpha_fraction": 0.7884615659713745, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 52, "blob_id": "2f8fae009ddfc0932443ffc2047d2d9c380a9e46", "content_id": "87df7f31d435676d2c2aa59a7eae9a2dda681d81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/test/lib/python3.6/rlcompleter.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/rlcompleter.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "7b54c975f46a51c3df52f1bc8f9f2852842a9a51", "content_id": "a4ecfcccf170edb706fe25940b0e46c6da1f89f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/fnmatch.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/fnmatch.py" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 50, "blob_id": "3c5a22209fd6e7972ed7e080e15b622e449cdd44", "content_id": "9e23e864a4052c6ddea7213d7e35b0ba4ae9ed10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/test/lib/python3.6/functools.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/functools.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "db7e928da0a292f2951f56013ea37cd1c6a8acc3", "content_id": "11270888ba8e9ea76ff1bfb4af4ad7e8e19aff16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/struct.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/struct.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "2038ec1471f40d9102e9876e38b876f629650133", "content_id": "fe903c4d91a27ed5fbad132d7c072f571a758016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/codecs.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/codecs.py" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 49, "blob_id": "65fc19036e9ba3bdc2bab2402150f1ef7b9a6214", "content_id": "efa26cec67fca028effc0683f253ad023e0c211e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/test/lib/python3.6/tempfile.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/tempfile.py" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 49, "blob_id": "fac4f5a4d076adcad26a5f614cc49bcaa8f39fe9", "content_id": "cf1ebb00dd3bcb287750cbd7b0843642dce2dec4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/test/lib/python3.6/tokenize.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/tokenize.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "62c5f45c1b281141c85783b7aaadafab54d7fa42", "content_id": "b81a3742c6e26737de15c44979b1deada3542715", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/ntpath.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/ntpath.py" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.8139534592628479, "avg_line_length": 43, "blob_id": "afea03c4023534e73f470ba2283a7f945afef733", "content_id": "903051744af4fbf3597d60ca6f92d99a89523d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 43, "num_lines": 1, "path": "/test/lib/python3.6/re.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/re.py" }, { "alpha_fraction": 0.7592592835426331, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 54, "blob_id": "b4faee508fa8a105a64e997d47cbb2088368256a", "content_id": "2e957b10aa052fa58f05b798bc35196f48f18d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 54, "num_lines": 1, "path": "/test/lib/python3.6/_dummy_thread.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/_dummy_thread.py" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 54, "blob_id": "853d82fd3e4ee9dbfc3d83c912b42718c295a62c", "content_id": "6ed13f3eb1919b1c8ce401400596638d7ecc0f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 54, "num_lines": 1, "path": "/test/lib/python3.6/sre_constants.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/sre_constants.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "185e3333178c6badcee1cba88b6af62956af1d51", "content_id": "9400f4002d098f3173e1ae58ec1cf89861411ef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/keyword.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/keyword.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "bad894fa68a35c4845c850b4ba05ade01898655e", "content_id": "87a41b8f1d000bd0ead593e4c4f5ff965b13ecfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/tarfile.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/tarfile.py" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 50, "blob_id": "f14287640c265c77e9c28cafa86bc5701131fcf7", "content_id": "66d8e6692d0f90e98e7fb30491176d1471d4ac66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/test/lib/python3.6/posixpath.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/posixpath.py" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "09f150b46aae6b2c050b444e6ffa954eec8566b6", "content_id": "797853e0278908e1905a67724673b94935755745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/base64.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/base64.py" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 49, "blob_id": "93353511d3efaaff3cbb9f93277ed3e47193d19c", "content_id": "214491d94d70a4466603ae964904920f386640b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/test/lib/python3.6/operator.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/operator.py" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.8199999928474426, "avg_line_length": 50, "blob_id": "68765447bea3d1e2729aebbb7b9ce6a626bb9bab", "content_id": "41a80f336b3ab5fba81d26f1cec05c630772bc91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/test/lib/python3.6/sre_parse.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/sre_parse.py" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 44, "blob_id": "e97558e036c0e3c331cce1bd78f0085f4d708d3c", "content_id": "5a50d842b9227acdcd6570a816d67910f3d9ffee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/test/lib/python3.6/abc.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/abc.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "51c876ea7f3c6417c75212b0375809f2bf371faf", "content_id": "16e06d34077dbf262320380debb640c5385d150d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/random.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/random.py" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.8222222328186035, "avg_line_length": 45, "blob_id": "597860e49e39f468e1277e39a7ade42e858b2917", "content_id": "854120effe5c0d8aec66ef23ffc61ffe9601e2fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/test/lib/python3.6/hmac.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/hmac.py" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.8139534592628479, "avg_line_length": 43, "blob_id": "cb02b6dcb6c1c218b59f4333514c9ede93af9c19", "content_id": "2600d582abf89ed327363aaa8f5ba048148b74bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 43, "num_lines": 1, "path": "/test/lib/python3.6/os.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/os.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "ac4eb8e55f2603190f0703cb06c305dbec7cb448", "content_id": "0fd45e500527927e9e505d60c33212c13ddaf08b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/shutil.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/shutil.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "99e44349a2d557e2970ad3b5f039bdd610ada380", "content_id": "0171ba9809167452a3bbd94516c2242652690062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/copyreg.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/copyreg.py" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 57, "blob_id": "9be992c2cbbbd5733fb6e3b3ab3d1dbd1084fa08", "content_id": "8fc8ad6bce55423bdc51a01c9a4bb974a5eb5e2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/test/lib/python3.6/_collections_abc.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/_collections_abc.py" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 44, "blob_id": "3b74e3a684906cb1c4ebedc262e5816d95a3c133", "content_id": "77032f3271b01a2c40b2f666c11f012275dc8df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/test/lib/python3.6/imp.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/imp.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "2cc01e6cac5ddacb55a0e34228f15a82cde9b4af", "content_id": "228886a436fc8c24c365a7c606eb8bf7fcc05f7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/bisect.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/bisect.py" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "89dba0a994d4192801fdee46f0403c224de7e766", "content_id": "c23da2f6c83617120300c45de552d29fb5b95389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/test/lib/python3.6/locale.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/locale.py" }, { "alpha_fraction": 0.760869562625885, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 46, "blob_id": "95f5d29b58c271d7e675d2bb16a1ed707ca24950", "content_id": "eadb6172674217e4de575fbcb304239bda9e73ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/test/lib/python3.6/heapq.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/heapq.py" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.8269230723381042, "avg_line_length": 52, "blob_id": "b098f2e90f8c6cb2f9a609f6908ce0ed6e978600", "content_id": "328c4e7d318c0639b9dbf70e6adb773ed59eeeda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/test/lib/python3.6/sre_compile.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/sre_compile.py" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 49, "blob_id": "bcb1ae35bb29b45214e915fa35a49bfa02eadaa4", "content_id": "9193e1af9a8551c5b18f9fcecebb0fad81bbee1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/test/lib/python3.6/warnings.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/warnings.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "729b95d77b00ab6267be65afff6a1aaeb8945d7a", "content_id": "192bd463913d78d2e1d560f54e8288af2e0bc3e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/hashlib.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/hashlib.py" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.8269230723381042, "avg_line_length": 52, "blob_id": "74b8647c0cd22a348d3fc0fcd88b06b7a457074e", "content_id": "b9bd4bb9c867baa6b672e4a431a4252f73edb285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/test/lib/python3.6/_weakrefset.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/_weakrefset.py" }, { "alpha_fraction": 0.7708333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "4efa46d8ba2de10b534701c2f45255ba66c89db2", "content_id": "e5edaea7254bda9b7ab3c6ed7840cdee5505f479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/test/lib/python3.6/weakref.py", "repo_name": "ccmkorea/real-python-test", "src_encoding": "UTF-8", "text": "/Users/vcbank/anaconda3/lib/python3.6/weakref.py" } ]
38
chitgub/OCR2audio
https://github.com/chitgub/OCR2audio
ceb72be78731319eb2843d564dff06a771548a7a
bce16e04e6ffe8d190ac15eacd05e2007e10dd30
aa847bf8c017de42febca8062ad843226ae7cf2a
refs/heads/master
2020-12-09T17:00:40.104793
2020-01-12T09:13:39
2020-01-12T09:13:39
233,365,550
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7384615540504456, "alphanum_fraction": 0.7461538314819336, "avg_line_length": 20.41666603088379, "blob_id": "421ea5cb17d65f33132b31f02c500fa2c649b3aa", "content_id": "8f902f74cb56a6c3b23f0c70e94eb7f981dbfbb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/OCR2audio.py", "repo_name": "chitgub/OCR2audio", "src_encoding": "UTF-8", "text": "try:\n from PIL import Image\nexcept ImportError:\n import Image\nimport pytesseract\nimport pyttsx3\n\nocr_to_text = pytesseract.image_to_string(Image.open('image.png'))\nprint(ocr_to_text)\nengine = pyttsx3.init()\nengine.say(ocr_to_text)\nengine.runAndWait()\n\n\n\n" } ]
1
psicktrick/feature_selection_project
https://github.com/psicktrick/feature_selection_project
befb85c0eb2259e168267a4b868c6e957f681a81
3666ada3a8b8258336f4cc6b008fb0c18152c6b5
231c1eaa6110d2b8c2cdad038dd3b84e207fe50b
refs/heads/master
2020-04-14T15:12:08.863533
2019-01-04T04:31:27
2019-01-04T04:31:27
163,918,499
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6844525933265686, "alphanum_fraction": 0.6936522722244263, "avg_line_length": 28.324323654174805, "blob_id": "7ae34152803a5784152bf6a182b34b0296005e22", "content_id": "e37e0923e72b3f0a85c476f5c72b7d280a779496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 85, "num_lines": 37, "path": "/q02_best_k_features/build.py", "repo_name": "psicktrick/feature_selection_project", "src_encoding": "UTF-8", "text": "# %load q02_best_k_features/build.py\n# Default imports\n\nimport pandas as pd\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.feature_selection import f_regression\n\n\n# Write your solution here:\ndef percentile_k_features(df,k=20):\n X=df.iloc[:,:-1]\n y=df.iloc[:,-1]\n sp = SelectPercentile(f_regression,percentile=k)\n sp.fit_transform(X,y)\n features = X.columns.values[sp.get_support()]\n scores = sp.scores_[sp.get_support()]\n fs_score = list(zip(features,scores))\n df = pd.DataFrame(fs_score,columns=['Name','Score'])\n return df.sort_values(['Score','Name'],ascending = [False,True])['Name'].tolist()\n\n\n\nX=data.iloc[:,:-1]\ny=data.iloc[:,-1]\nsp = SelectPercentile(f_regression,percentile=20)\nsp.fit_transform(X,y)\n\nfeatures = X.columns.values[sp.get_support()]\nscores = sp.scores_[sp.get_support()]\nscores\nfs_score = list(zip(features,scores))\ndf = pd.DataFrame(fs_score,columns=['Name','Score'])\ndf.head()\ndf.sort_values(['Score','Name'],ascending = [False,True])#['Name'].tolist()\n\n\n" }, { "alpha_fraction": 0.6910890936851501, "alphanum_fraction": 0.7009900808334351, "avg_line_length": 22.952381134033203, "blob_id": "2c3dcc928ef0f196b6b12d29c85ea2fc1f9f1d36", "content_id": "9232ede1b8890fe04f393ed26d8383cb9322e093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 56, "num_lines": 21, "path": "/q03_rf_rfe/build.py", "repo_name": "psicktrick/feature_selection_project", "src_encoding": "UTF-8", "text": "# %load q03_rf_rfe/build.py\n# Default imports\nimport pandas as pd\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import RFE\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Your solution code here\n\ndef rf_rfe(df):\n X,y = df.iloc[:,:-1],df.iloc[:,-1]\n model = RandomForestClassifier()\n features_no = X.columns\n rfe = RFE(model,len(X.columns)/2)\n rfe.fit(X,y)\n return X.columns.values[rfe.get_support()].tolist()\nrf_rfe(data)\ndata.shape\n\n\n" }, { "alpha_fraction": 0.7295238375663757, "alphanum_fraction": 0.739047646522522, "avg_line_length": 29.764705657958984, "blob_id": "36ca62771f367b0a4d88d46c6f0adf3d03f45442", "content_id": "fcca888103c16112ee5f0b8dbf7ee70cd8e3562c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 57, "num_lines": 17, "path": "/q04_select_from_model/build.py", "repo_name": "psicktrick/feature_selection_project", "src_encoding": "UTF-8", "text": "# %load q04_select_from_model/build.py\n# Default imports\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\n\n# Your solution code here\ndef select_from_model(df):\n X,y = df.iloc[:,:-1],df.iloc[:,-1]\n clf = RandomForestClassifier(random_state=9)\n model = SelectFromModel(clf)\n model.fit_transform(X,y)\n return X.columns.values[model.get_support()].tolist()\n\n\n" }, { "alpha_fraction": 0.7195122241973877, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 24.789474487304688, "blob_id": "4c341d3234bdced97e9581f9668d05cdc5fb0626", "content_id": "c81da1d610cf4324091892a25b23f7af0f7cba86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/q01_plot_corr/build.py", "repo_name": "psicktrick/feature_selection_project", "src_encoding": "UTF-8", "text": "# %load q01_plot_corr/build.py\n# Default imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import yticks, xticks, subplots, set_cmap\nplt.switch_backend('agg')\nimport seaborn as sns\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\n\n# Write your solution here:\ndef plot_corr(data, size=11):\n sns.heatmap(data.corr(), cmap='YlOrRd')\n plt.show()\nplot_corr(data, size=11)\nsns.heatmap(data.corr(), cmap='YlOrRd')\nplt.show()\ndata.head()\ndata.dtypes\n\n\n" } ]
4
zhangxu90s/Work2019_DFF_SSM
https://github.com/zhangxu90s/Work2019_DFF_SSM
40b52bd11e203d88ae7d97485d0471f46144893b
4a42c3f921e042374b8dda3fd1acfe0e1a23eec7
19c67692a591d7eeb263bead1226d96a281d51b9
refs/heads/master
2022-11-13T05:36:47.782886
2020-07-06T02:27:46
2020-07-06T02:27:46
166,228,101
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6141752004623413, "alphanum_fraction": 0.6413084864616394, "avg_line_length": 32.24894332885742, "blob_id": "b63803caa656337c030b4965035a55d982ab7751", "content_id": "9049f0257c70c2a2a9cecdb6355de945a7938b80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7923, "license_type": "no_license", "max_line_length": 179, "num_lines": 237, "path": "/LCQMC/siamese_NN.py", "repo_name": "zhangxu90s/Work2019_DFF_SSM", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\n\n# ๆŒ‡ๅฎš็ฌฌไธ€ๅ—GPUๅฏ็”จ \nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nconfig = tf.ConfigProto() \nconfig.gpu_options.allow_growth=True #ไธๅ…จ้ƒจๅ ๆปกๆ˜พๅญ˜, ๆŒ‰้œ€ๅˆ†้…\nsess = tf.Session(config=config)\n\nKTF.set_session(sess)\nimport numpy as np\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint, TensorBoard,EarlyStopping, ReduceLROnPlateau\nfrom keras.layers import Embedding, Input, TimeDistributed, Lambda,LSTM,GlobalMaxPooling1D,Dense,Activation,subtract,Add,multiply,concatenate,merge,Dropout,BatchNormalization\nfrom keras.models import Model,Sequential\nfrom sklearn.model_selection import train_test_split\nfrom keras.optimizers import Adam,Adadelta\nfrom keras.preprocessing.sequence import pad_sequences\nfrom multi_perspective import MultiPerspective,PredictLayer\nimport data_helper\n\n\ninput_dim = data_helper.MAX_SEQUENCE_LENGTH\nemb_dim = data_helper.EMB_DIM\nmodel_path = './model/siameselstm.hdf5'\ntensorboard_path = './model/ensembling'\n\nembedding_matrix = data_helper.load_pickle('embedding_matrix.pkl')\n\nembedding_layer = Embedding(embedding_matrix.shape[0],\n emb_dim,\n weights=[embedding_matrix],\n input_length=input_dim,\n trainable=False)\n\n\ndef base_network(input_shape):\n input = Input(shape=input_shape)\n\n x = embedding_layer(input)\n x = TimeDistributed(Dense(300, activation='relu'))(x)\n x = Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))(x)\n \n y = embedding_layer(input)\n y = TimeDistributed(Dense(300, activation='relu'))(y)\n y = Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))(y)\n\n multi_memory_DT = Add()([x,y])\n\n p = embedding_layer(input)\n p = LSTM(300, return_sequences=False, dropout=0.1, recurrent_dropout=0.1,name='f_input')(p)\n\n q = embedding_layer(input)\n q = LSTM(300, return_sequences=False, dropout=0.1, recurrent_dropout=0.1,name='re_input')(q)\n\n multi_memory_lstm = Add()([p,q])\n\n multi_memory = concatenate([multi_memory_lstm,multi_memory_DT])\n\n\n return Model(input, multi_memory, name='DFF')\n\ndef f1_score(y_true, y_pred):\n\n # Count positive samples.\n c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))\n c3 = K.sum(K.round(K.clip(y_true, 0, 1)))\n\n # If there are no true samples, fix the F1 score at 0.\n if c3 == 0:\n return 0\n\n # How many selected items are relevant?\n precision = c1 / c2\n\n # How many relevant items are selected?\n recall = c1 / c3\n\n # Calculate f1_score\n f1_score = 2 * (precision * recall) / (precision + recall)\n return f1_score\n\n\ndef precision(y_true, y_pred):\n\n # Count positive samples.\n c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))\n c3 = K.sum(K.round(K.clip(y_true, 0, 1)))\n\n # If there are no true samples, fix the F1 score at 0.\n if c3 == 0:\n return 0\n\n # How many selected items are relevant?\n precision = c1 / c2\n\n return precision\n\n\ndef recall(y_true, y_pred):\n\n # Count positive samples.\n c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n c3 = K.sum(K.round(K.clip(y_true, 0, 1)))\n\n # If there are no true samples, fix the F1 score at 0.\n if c3 == 0:\n return 0\n\n recall = c1 / c3\n\n return recall\n\nmargin = 0.7\ntheta = lambda t: (K.sign(t)+1.)/2.\nnb_classes = 2\ndef new_mse_loss(y_true, y_pred):\n loss1 = mse_loss(y_true, y_pred)\n #one_hot\n loss2 = mse_loss(K.ones_like(y_pred)/nb_classes, y_pred)\n return 0.9*loss1+0.1*loss2\n\ndef mse_loss(y_true, y_pred):\n return K.mean(K.square(y_true - y_pred))\n\ndef loss(y_true, y_pred):\n return - (1 - theta(y_true - margin) * theta(y_pred - margin) \n - theta(1 - margin - y_true) * theta(1 - margin - y_pred)\n ) * (y_true * K.log(y_pred + 1e-8) + (1 - y_true) * K.log(1 - y_pred + 1e-8))\ndef myloss(y_true, y_pred, e=0.35):\n loss1 = mse_loss(y_true, y_pred)\n #one_hot\n loss2 = mse_loss(K.ones_like(y_pred)/nb_classes, y_pred)\n loss3 = loss(y_true, y_pred)\n return (1-2*e)*loss1 + e*loss2 + e*loss3\n\ndef siamese_model():\n input_shape = (input_dim,)\n \n # Creating Encoder\n\n base_net = base_network(input_shape)\n\n \n # Creating Encoder layer for frist Sentence\n input_q1 = Input(shape=input_shape, dtype='int32', name='sequence1')\n processed_q1 = base_net([input_q1])\n \n \n # Creating Encoder layer for Second Sentence\n input_q2 = Input(shape=input_shape, dtype='int32', name='sequence2')\n processed_q2 = base_net([input_q2])\n \n #doing matching\n abs_diff = Lambda(lambda x: K.abs(x[0] - x[1]))([processed_q1,processed_q2])\n cos_diff = Lambda(lambda x: K.cos(x[0] - x[1]))([processed_q1,processed_q2])\n multi_diff = multiply([processed_q1,processed_q2])\n all_diff = concatenate([abs_diff,cos_diff,multi_diff])\n\n #DNN\n all_diff = Dropout(0.5)(all_diff)\n similarity = Dense(600)(all_diff)\n similarity = BatchNormalization()(similarity)\n similarity = Activation('relu')(similarity)\n similarity = Dense(600)(similarity)\n similarity = Dropout(0.5)(similarity)\n similarity = Activation('relu')(similarity)\n similarity = Dense(1)(similarity)\n similarity = BatchNormalization()(similarity)\n similarity = Activation('sigmoid')(similarity)\n model = Model([input_q1, input_q2], [similarity])\n #loss:binary_crossentropy;optimizer:adm,Adadelta\n adm = Adam(lr=0.002)\n model.compile(loss=myloss, optimizer=adm, metrics=['accuracy', precision, recall, f1_score])\n return model\n\n\ndef train():\n \n data = data_helper.load_pickle('model_data.pkl')\n\n train_q1 = data['train_q1']\n train_q2 = data['train_q2']\n train_y = data['train_label']\n\n dev_q1 = data['dev_q1']\n dev_q2 = data['dev_q2']\n dev_y = data['dev_label']\n \n test_q1 = data['test_q1']\n test_q2 = data['test_q2']\n test_y = data['test_label']\n \n model = siamese_model()\n checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', period=1)\n tensorboard = TensorBoard(log_dir=tensorboard_path) \n earlystopping = EarlyStopping(monitor='val_acc', patience=10, verbose=0, mode='max')\n reduce_lr = ReduceLROnPlateau(monitor='val_acc', patience=5, mode='max')\n callbackslist = [checkpoint, tensorboard,earlystopping,reduce_lr]\n\n model.fit([train_q1, train_q2], train_y,\n batch_size=512,\n epochs=200,\n validation_data=([dev_q1, dev_q2], dev_y),\n callbacks=callbackslist)\n '''\n ## Add graphs here\n import matplotlib.pyplot as plt\n\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss']) \n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.plot(history.history['precision'])\n plt.plot(history.history['val_precision'])\n plt.plot(history.history['recall'])\n plt.plot(history.history['val_recall'])\n plt.plot(history.history['f1_score'])\n plt.plot(history.history['val_f1_score'])\n plt.xlabel('epoch')\n plt.legend(['train loss', 'val loss','train accuracy', 'val accuracy','train precision', 'val precision','train recall', 'val recall','train f1_score', 'val f1_score'], loc=3,\n bbox_to_anchor=(1.05,0),borderaxespad=0)\n pic = plt.gcf()\n pic.savefig ('pic.eps',format = 'eps',dpi=1000)\n plt.show()\n '''\n loss, accuracy, precision, recall, f1_score = model.evaluate([test_q1, test_q2],test_y,verbose=1,batch_size=256)\n print(\"Test best model =loss: %.4f, accuracy:%.4f, precision:%.4f,recall: %.4f, f1_score:%.4f\" % (loss, accuracy, precision, recall, f1_score))\n\nif __name__ == '__main__':\n train()\n \n \n" }, { "alpha_fraction": 0.6835051774978638, "alphanum_fraction": 0.7298969030380249, "avg_line_length": 31.33333396911621, "blob_id": "f9e99657ba536ed5059ec2716db624797bfbff6a", "content_id": "281623b76fb03cf38eae8bab1a6c5562785d460a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 970, "license_type": "no_license", "max_line_length": 122, "num_lines": 30, "path": "/README.md", "repo_name": "zhangxu90s/Work2019_DFF_SSM", "src_encoding": "UTF-8", "text": "# Work2019_DFF_SSM\n\nThis repo contains the implementation of \"Deep Feature Fusion Model for Sentence Semantic Matching\" in Keras & Tensorflow.\n# Usage for python code\n## 0. Requirement\npython 3.6 \nnumpy==1.16.4 \npandas==0.22.0 \ntensorboard==1.12.0 \ntensorflow-gpu==1.12.0 \nkeras==2.2.4 \ngensim==3.0.0\n## 1. Data preparation\nThe dataset is Quora & LCQMC.\\\n\"Quora question pairs.\", https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs.\n\n\"LCQMC: A Large-scale Chinese Question Matching Corpus\", https://www.aclweb.org/anthology/C18-1166/.\n## 2. Start the training process\npython siamese_NN.py \n\n# Reference\nIf you find our source useful, please consider citing our work.\n\n@article{zhang2019deep,\\\n title={Deep Feature Fusion Model for Sentence Semantic Matching},\\\n author={Zhang, X and Lu, W and Li, F and Peng, X and Zhang, R},\\\n journal={Computers, Materials \\& Continua},\\\n year={2019},\\\n publisher={Computers, Materials and Continua (Tech Science Press)}\\\n}\n" } ]
2
EvReN-jr/Random-Forest-Classifier-Algorithm-Sklearn
https://github.com/EvReN-jr/Random-Forest-Classifier-Algorithm-Sklearn
4d416fb62972e5cfd378f386def2b6581287d32c
1dc56c5b8242950b489c5a60efe736f47d4fa135
1952b81390d5518cc72bf51562cc634ea6706fe8
refs/heads/master
2022-09-14T17:50:09.924003
2020-05-27T12:44:25
2020-05-27T12:44:25
267,314,306
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7140762209892273, "alphanum_fraction": 0.7478005886077881, "avg_line_length": 23.35714340209961, "blob_id": "45d8a061965b4980d832fafbb17d97872fae507d", "content_id": "7f96fc12cf054133d309787fd41437efc3d4172b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/code.py", "repo_name": "EvReN-jr/Random-Forest-Classifier-Algorithm-Sklearn", "src_encoding": "UTF-8", "text": "import pandas as pd#1\nfrom sklearn.model_selection import train_test_split #2\nfrom sklearn.ensemble import RandomForestClassifier#3\nfrom sklearn.metrics import confusion_matrix#4\n# import libs\n\ndatas=pd.read_csv(\"datas.csv\")# read datas\n#1\n\nx=datas.iloc[:,3:-3].values \ny=datas.iloc[:,-2].values\n# split values\n\nx_train, x_test,y_train,y_test = train_test_split(x,y,test_size=0.10, random_state=0)# 90% for train, %10 for test\n#2\n\nrfc=RandomForestClassifier()\n#3\nrfc.max_depth=100\nrfc.criterion=\"entropy\"#select criterion,other criterion is 'gini'\nrfc.n_estimators=1\nrfc.fit(x_train,y_train)\n\ny_pred=rfc.predict(x_test)\ncm=confusion_matrix(y_test,y_pred)\n#4\nprint(\"RFC\")\nprint(cm)\n" }, { "alpha_fraction": 0.8207547068595886, "alphanum_fraction": 0.8207547068595886, "avg_line_length": 52, "blob_id": "f837af44457c2aea3b1dc853c41e9a9a063e00f9", "content_id": "7fe25a3bce12783bd941e0dfdd8b3df3553329cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 68, "num_lines": 2, "path": "/README.md", "repo_name": "EvReN-jr/Random-Forest-Classifier-Algorithm-Sklearn", "src_encoding": "UTF-8", "text": "# Random Forest Classifier-Algorithm\ndatas_url:https://support.spatialkey.com/spatialkey-sample-csv-data/\n" } ]
2
kingaza/HandPose
https://github.com/kingaza/HandPose
b6601be1add6846cfcb33b54b0a307bb6bca0982
93ef98a24fa250ce4c9b2c3142bc15e884df2be4
7cb8cd6a1f3a5a5e011a273ece308879c2d2332a
refs/heads/master
2020-08-21T09:40:39.086571
2019-12-13T07:24:37
2019-12-13T07:24:37
216,133,027
0
0
MIT
2019-10-19T01:34:08
2019-10-08T04:14:33
2019-05-27T07:37:16
null
[ { "alpha_fraction": 0.5955487489700317, "alphanum_fraction": 0.6001535058021545, "avg_line_length": 22.709091186523438, "blob_id": "5b82f047015846dcda36a9a1423977460fe4f4ba", "content_id": "af8886164803cfe9c4aa43fa9a6db8157c2e2cb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "permissive", "max_line_length": 67, "num_lines": 55, "path": "/Demo_server.py", "repo_name": "kingaza/HandPose", "src_encoding": "UTF-8", "text": "from flask import Flask\n\nimport pygame\n\n\nidx_song = 0\nsongs = ['./Songs/fly.mp3', './Songs/hero.mp3', './Songs/love.mp3']\n\npygame.mixer.init()\npygame.mixer.music.load(songs[idx_song])\n\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n return 'I am a Server.'\n\[email protected]('/ptab/move', methods=['GET', 'POST'])\ndef move():\n return 'Move PTab.' \n\[email protected]('/ptab/stop', methods=['GET', 'POST'])\ndef stop():\n return 'Stop PTab.' \n\[email protected]('/music/play', methods=['GET', 'POST'])\ndef music_play():\n pygame.mixer.music.play()\n return 'Play a song.' \n\[email protected]('/music/stop', methods=['GET', 'POST'])\ndef music_stop():\n pygame.mixer.music.stop()\n return 'Stop playing song.' \n\[email protected]('/music/last', methods=['GET', 'POST'])\ndef music_last():\n global idx_song\n idx_song = (idx_song - 1) % len(songs)\n pygame.mixer.music.load(songs[idx_song])\n pygame.mixer.music.play()\n return 'Play last song.' \n\[email protected]('/music/next', methods=['GET', 'POST'])\ndef music_next():\n global idx_song\n idx_song = (idx_song + 1) % len(songs)\n pygame.mixer.music.load(songs[idx_song])\n pygame.mixer.music.play() \n return 'Play next song.' \n\nif __name__ == '__main__':\n app.debug = True # ่ฎพ็ฝฎ่ฐƒ่ฏ•ๆจกๅผ๏ผŒ็”Ÿไบงๆจกๅผ็š„ๆ—ถๅ€™่ฆๅ…ณๆމdebug\n app.run()" }, { "alpha_fraction": 0.4642857015132904, "alphanum_fraction": 0.47278910875320435, "avg_line_length": 29.947368621826172, "blob_id": "402eb2b8ca9d706ba4c30b44a65dade6865e3d1a", "content_id": "7c5a249b8b98f478c3db0a2d3858afa7861e7c20", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "permissive", "max_line_length": 66, "num_lines": 19, "path": "/flipPosesDataset.py", "repo_name": "kingaza/HandPose", "src_encoding": "UTF-8", "text": "import os\nimport cv2 \n \nposes = os.listdir('Poses/')\nfor pose in poses:\n print(\">> Working on pose : \" + pose)\n subdirs = os.listdir('Poses/' + pose + '/') \n for subdir in subdirs:\n files = os.listdir('Poses/' + pose + '/' + subdir + '/')\n print(\">> Working on examples : \" + subdir)\n for file in files:\n if(file.endswith(\".png\")):\n path = 'Poses/' + pose + '/' + subdir + '/' + file\n # Read image\n im = cv2.imread(path)\n\n im = cv2.flip(im, 1)\n\n cv2.imwrite(path, im)\n" }, { "alpha_fraction": 0.5316122174263, "alphanum_fraction": 0.5378819704055786, "avg_line_length": 33.6983528137207, "blob_id": "15a5d3680249003e38730b756edc2b5f48cca2b8", "content_id": "4f912082ff7d602607eac8b93fa75777896c3a24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18980, "license_type": "permissive", "max_line_length": 131, "num_lines": 547, "path": "/demo_hackathon.py", "repo_name": "kingaza/HandPose", "src_encoding": "UTF-8", "text": "from utils import detector_utils as detector_utils\nfrom utils import pose_classification_utils as classifier\nimport cv2\nimport tensorflow as tf\nimport multiprocessing\nfrom multiprocessing import Queue, Pool\nimport time\nfrom utils.detector_utils import WebcamVideoStream\nimport datetime\nimport argparse\nimport os; \nos.environ['KERAS_BACKEND'] = 'tensorflow'\nimport keras\nimport gui\n\nimport numpy as np\nimport requests\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level = logging.INFO)\nhandler = logging.FileHandler(\"hackathon.log\")\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')\nhandler.setFormatter(formatter)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\nlogger.addHandler(handler)\nlogger.addHandler(console)\n\nlogger.info('=' * 80)\nlogger.info(\"Start DEMO of Controlling Patient Table\")\nlogger.info('=' * 80)\n\nframe_processed = 0\nscore_thresh = 0.18\n\n# Create a worker thread that loads graph and\n# does detection on images in an input queue and puts it on an output queue\n\nurl_root = 'http://md1z7xac.ad005.onehc.net:5757/api'\n\n\nclass ModeSwitch(object):\n\n def __init__(self):\n self.mode_light = False\n self.mode_ptab = False\n\n self.url_mode_ptab = url_root + '/mode/ptab'\n self.url_mode_light = url_root + '/mode/light'\n\n def set_ptab(self):\n self.mode_ptab = True\n self.mode_light = False\n logger.info(' ==> Set PTab Mode')\n resp = requests.get(self.url_mode_ptab)\n logger.info(f'Send request, receive: {resp.status_code}') \n\n def set_light(self):\n self.mode_light = True\n self.mode_ptab = False\n logger.info(' ==> Set Light Mode')\n resp = requests.get(self.url_mode_light)\n logger.info(f'Send request, receive: {resp.status_code}') \n\n\n\nclass LightController(object):\n def __init__(self):\n self.activated = False\n self.url_light_brighter = url_root + '/light/light-brighter' \n self.url_light_darker = url_root + '/light/light-darker' \n self.url_light_switch = url_root + '/light/light_switch' \n self.url_light_pause = url_root + '/ptab/pause' \n\n self.last_request_time = -1 \n\n self.in_darkering = False\n self.in_brightering = False\n\n def set_activated(self, activated):\n self.activated = activated\n\n def darker(self):\n if not self.in_darkering:\n logger.info(' ==> Darker Light')\n self.in_darkering = True\n resp = requests.get(self.url_light_darker)\n logger.info(f'Send request, receive: {resp.status_code}')\n self.last_request_time = time.time() \n self.in_brightering = False \n\n def brighter(self):\n if not self.in_brightering:\n self.in_brightering = True\n logger.info(' ==> Brighter Light')\n resp = requests.get(self.url_light_brighter)\n logger.info(f'Send request, receive: {resp.status_code}')\n self.last_request_time = time.time() \n self.in_darkering = False \n\n def pause(self):\n if self.in_brightering:\n logger.info(' ==> Pause brightering light') \n self.in_brightering = False\n resp = requests.get(self.url_light_pause)\n logger.info(self.url_light_pause)\n logger.info(f'Send request, receive: {resp.status_code}') \n self.last_request_time = time.time() \n\n if self.in_darkering:\n logger.info(' ==> Pause darkering light') \n self.in_darkering = False\n logger.info(self.url_light_pause)\n resp = requests.get(self.url_light_pause)\n logger.info(f'Send request, receive: {resp.status_code}') \n self.last_request_time = time.time() \n\n def switch(self):\n logger.info(' ==> Switch Light')\n resp = requests.get(self.url_light_switch)\n logger.info(f'Send request, receive: {resp.status_code}')\n self.last_request_time = time.time() \n\nclass PTabController(object):\n def __init__(self):\n self.activated = False\n\n self.last_request_time = -1\n self.last_movein_time = -1\n self.last_moveout_time = -1\n\n self.in_tohome = False\n self.in_movein = False\n self.in_moveout = False\n\n self.url_ptab_tohome = url_root + '/ptab/move-home' # thumb\n self.url_ptab_pause = url_root + '/ptab/pause' # palm\n self.url_ptab_movein = url_root + '/ptab/move-in' # left\n self.url_ptab_moveout = url_root + '/ptab/move-out' # right \n\n\n def set_activated(self, activated):\n self.activated = activated\n\n def move_in(self):\n if not self.in_movein:\n logger.info(' ==> Move PTab IN')\n resp = requests.get(self.url_ptab_movein)\n logger.info(f'Send request, receive: {resp.status_code}')\n self.last_request_time = time.time() \n self.in_movein = True \n self.in_moveout = False\n self.in_tohome = False \n\n def move_out(self):\n if not self.in_moveout:\n logger.info(' ==> Move PTab OUT')\n resp = requests.get(self.url_ptab_moveout)\n logger.info(f'Send request, receive: {resp.status_code}') \n self.last_request_time = time.time() \n self.in_movein = False \n self.in_moveout = True\n self.in_tohome = False \n\n def to_home(self):\n if not self.in_tohome:\n logger.info(' ==> Move PTab to HOME')\n resp = requests.get(self.url_ptab_tohome)\n logger.info(f'Send request, receive: {resp.status_code}') \n self.last_request_time = time.time() \n self.in_movein = False \n self.in_moveout = False\n self.in_tohome = True \n\n def pause(self):\n if self.in_movein or self.in_moveout or self.in_tohome:\n logger.info(' ==> Pause PTab')\n resp = requests.get(self.url_ptab_pause)\n logger.info(f'Send request, receive: {resp.status_code}') \n self.last_request_time = time.time() \n self.in_movein = False \n self.in_moveout = False\n self.in_tohome = False \n\n\n\ndef worker(input_q, output_q, cropped_output_q, inferences_q, cap_params, frame_processed):\n logger.info(\">> loading frozen model for worker\")\n detection_graph, sess = detector_utils.load_inference_graph()\n sess = tf.Session(graph=detection_graph)\n\n logger.info(\">> loading keras model for worker\")\n try:\n model, classification_graph, session = classifier.load_KerasGraph(\"cnn/models/handposes_vgg64_v1.h5\")\n except Exception as e:\n logger.error(e)\n\n while True:\n #print(\"> ===== in worker loop, frame \", frame_processed)\n frame = input_q.get()\n if (frame is not None):\n # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,\n # while scores contains the confidence for each of these boxes.\n # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)\n boxes, scores = detector_utils.detect_objects(\n frame, detection_graph, sess)\n\n # get region of interest\n res = detector_utils.get_box_image(cap_params['num_hands_detect'], cap_params[\"score_thresh\"],\n scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)\n \n # draw bounding boxes\n detector_utils.draw_box_on_image(cap_params['num_hands_detect'], cap_params[\"score_thresh\"],\n scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)\n \n # classify hand pose\n if res is not None:\n class_res = classifier.classify(model, classification_graph, session, res)\n inferences_q.put(class_res) \n \n # add frame annotated with bounding box to queue\n cropped_output_q.put(res)\n output_q.put(frame)\n frame_processed += 1\n else:\n output_q.put(frame)\n sess.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-src',\n '--source',\n dest='video_source',\n type=int,\n default=0,\n help='Device index of the camera.')\n parser.add_argument(\n '-nhands',\n '--num_hands',\n dest='num_hands',\n type=int,\n default=1,\n help='Max number of hands to detect.')\n parser.add_argument(\n '-fps',\n '--fps',\n dest='fps',\n type=int,\n default=1,\n help='Show FPS on detection/display visualization')\n parser.add_argument(\n '-wd',\n '--width',\n dest='width',\n type=int,\n default=300,\n help='Width of the frames in the video stream.')\n parser.add_argument(\n '-ht',\n '--height',\n dest='height',\n type=int,\n default=200,\n help='Height of the frames in the video stream.')\n parser.add_argument(\n '-ds',\n '--display',\n dest='display',\n type=int,\n default=1,\n help='Display the detected images using OpenCV. This reduces FPS')\n parser.add_argument(\n '-num-w',\n '--num-workers',\n dest='num_workers',\n type=int,\n default=4,\n help='Number of workers.')\n parser.add_argument(\n '-q-size',\n '--queue-size',\n dest='queue_size',\n type=int,\n default=5,\n help='Size of the queue.')\n args = parser.parse_args()\n\n input_q = Queue(maxsize=args.queue_size)\n output_q = Queue(maxsize=args.queue_size)\n cropped_output_q = Queue(maxsize=args.queue_size)\n inferences_q = Queue(maxsize=args.queue_size)\n\n video_capture = WebcamVideoStream(\n src=args.video_source, width=args.width, height=args.height).start()\n\n cap_params = {}\n frame_processed = 0\n cap_params['im_width'], cap_params['im_height'] = video_capture.size()\n cap_params['score_thresh'] = score_thresh\n\n logger.info(f\"im_width={cap_params['im_width']}, im_height={cap_params['im_height']}\")\n\n # max number of hands we want to detect/track\n cap_params['num_hands_detect'] = args.num_hands\n\n logger.info(args)\n logger.info(cap_params)\n \n # Count number of files to increment new example directory\n poses = []\n _file = open(\"poses.txt\", \"r\") \n lines = _file.readlines()\n for line in lines:\n line = line.strip()\n if(line != \"\"):\n print(line)\n poses.append(line)\n\n logger.info(poses) \n\n\n # spin up workers to paralleize detection.\n pool = Pool(args.num_workers, worker,\n (input_q, output_q, cropped_output_q, inferences_q, cap_params, frame_processed))\n\n start_time = datetime.datetime.now()\n num_frames = 0\n fps = 0\n index = 0\n\n # cv2.namedWindow('Handpose', cv2.WINDOW_NORMAL)\n cv2.namedWindow('Handpose', 0)\n cv2.resizeWindow('Handpose', 640, 360)\n\n\n switch = ModeSwitch()\n ptab = PTabController()\n light = LightController()\n\n switch_duration = 1.2\n waiting_duration = 0.2\n recognition_duration = 1.0\n\n # used for mode switching\n thumb_beginning_time = None\n fist_beginning_time = None\n\n no_inference_begining_time = None\n\n pose_buf = []\n time_buf = []\n\n while True:\n frame = video_capture.read()\n frame = cv2.flip(frame, 1)\n index += 1\n\n input_q.put(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n output_frame = output_q.get()\n cropped_output = cropped_output_q.get()\n\n inferences = None\n\n try:\n inferences = inferences_q.get_nowait() \n except Exception as e:\n pass \n\n elapsed_time = (datetime.datetime.now() - start_time).total_seconds()\n num_frames += 1\n fps = num_frames / elapsed_time\n\n\n if inferences is None:\n logger.debug('No hand detected')\n \n if no_inference_begining_time is None:\n no_inference_begining_time = time.time()\n \n if time.time() - no_inference_begining_time > waiting_duration:\n # None of Ptab and Light\n if switch.mode_ptab:\n thumb_beginning_time = None\n\n if switch.mode_ptab:\n fist_beginning_time = None\n\n # control PTab\n # Pause ptab moving if no request for a long time\n if switch.mode_ptab:\n if time.time() - ptab.last_request_time > waiting_duration:\n logger.debug('No request in the last waiting time')\n logger.debug(f'PTab status: move-in={ptab.in_movein}, move-out={ptab.in_moveout}, tohome={ptab.in_tohome}')\n if ptab.in_movein or ptab.in_moveout:\n logger.info('Pause Ptab if it is not on the way home')\n ptab.pause()\n\n if switch.mode_light:\n if time.time() - light.last_request_time > waiting_duration:\n logger.debug('No request in the last waiting time')\n logger.debug(f'Light status: brightering={light.in_brightering}, darking={light.in_darkering}')\n if light.in_brightering or light.in_darkering:\n logger.info('Pause light brightering or darking')\n light.pause() \n\n\n # Display inferences\n if(inferences is not None):\n logger.debug(inferences)\n\n no_inference_begining_time = None\n\n t = time.time()\n p = np.argmax(inferences)\n\n time_buf.insert(0, t)\n pose_buf.insert(0, p)\n\n # remove the data which is not in the time window of recognition\n for i in np.arange(len(time_buf)-1):\n if time_buf[0] - time_buf[-1] > recognition_duration:\n time_buf.pop()\n pose_buf.pop()\n\n if len(pose_buf) > 5:\n from collections import Counter\n c = Counter(pose_buf)\n most_common_pose, detect_times = c.most_common(1)[0]\n logger.info(f'Pose {poses[most_common_pose]} happens {detect_times} / {len(pose_buf)}') \n\n # check firstly if switching mode needed\n if most_common_pose == 2:\n if not fist_beginning_time:\n fist_beginning_time = time.time()\n thumb_beginning_time = None\n\n elif most_common_pose == 4:\n if not thumb_beginning_time:\n thumb_beginning_time = time.time()\n fist_beginning_time = None\n\n else: \n thumb_beginning_time = None\n fist_beginning_time = None\n\n\n # pose left\n if most_common_pose == 0:\n if switch.mode_ptab:\n ptab.move_in()\n if switch.mode_light:\n light.darker() \n\n # pose right\n elif most_common_pose == 1:\n if switch.mode_ptab:\n ptab.move_out()\n if switch.mode_light:\n light.brighter() \n\n # pose fist, can switch to light mode\n elif most_common_pose == 2:\n # mode switch?\n if not switch.mode_light:\n if time.time() - fist_beginning_time > switch_duration:\n switch.set_light()\n\n # pose palm\n elif most_common_pose == 3:\n # mode PAUSE: \n if switch.mode_ptab:\n ptab.pause()\n\n # pose thumb, can switch to ptab mode\n elif most_common_pose == 4:\n # mode switch?\n if not switch.mode_ptab:\n if time.time() - thumb_beginning_time > switch_duration:\n switch.set_ptab()\n \n # MOVE HOME: \n if switch.mode_ptab:\n ptab.to_home()\n\n else:\n # Pause PTab except in the case of ToHome\n if switch.mode_ptab:\n if not ptab.in_tohome:\n ptab.pause()\n\n if switch.mode_light:\n light.pause() \n\n\n gui.drawInferences(inferences, poses)\n\n if (cropped_output is not None):\n cropped_output = cv2.cvtColor(cropped_output, cv2.COLOR_RGB2BGR)\n if (args.display > 0):\n cv2.namedWindow('Cropped', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Cropped', 450, 300)\n cv2.imshow('Cropped', cropped_output)\n #cv2.imwrite('image_' + str(num_frames) + '.png', cropped_output)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n if (num_frames == 400):\n num_frames = 0\n start_time = datetime.datetime.now()\n else:\n logger.info(f'frames processed: {index} elapsed time: {elapsed_time}, fps: {str(int(fps))}')\n\n \n # print(\"frame \", index, num_frames, elapsed_time, fps)\n\n if (output_frame is not None):\n output_frame = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)\n if (args.display > 0):\n if (args.fps > 0):\n detector_utils.draw_fps_on_image(\"FPS : \" + str(int(fps)),\n output_frame)\n cv2.imshow('Handpose', output_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n if (num_frames == 400):\n num_frames = 0\n start_time = datetime.datetime.now()\n else:\n logger.info(f'frames processed: {index} elapsed time: {elapsed_time}, fps: {str(int(fps))}')\n else:\n logger.info(\"video end\")\n break\n elapsed_time = (datetime.datetime.now() - start_time).total_seconds()\n fps = num_frames / elapsed_time\n logger.info(f'fps: {fps}')\n pool.terminate()\n video_capture.stop()\n cv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.5310386419296265, "alphanum_fraction": 0.5835748910903931, "avg_line_length": 32.3870964050293, "blob_id": "54a9379a4c51a2d678b2138fa6985336c709db82", "content_id": "fb623b0242bbae1bb1d558962436297f4a01a9b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8454, "license_type": "permissive", "max_line_length": 87, "num_lines": 248, "path": "/cnn/cnn.py", "repo_name": "kingaza/HandPose", "src_encoding": "UTF-8", "text": "import keras\nfrom keras.datasets import mnist\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Dense, Dropout, Flatten, BatchNormalization, Activation\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\n\n# only useful for working with matplotlib on OSX, \n# which is not a framework build of Python\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport os,sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport buildPosesDataset as dataset\n\n\ndef SimpleCNN(input_shape, n_class):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(n_class, activation='softmax'))\n\n # categorical ce since we have multiple classes (10)\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(lr=0.01),\n metrics=['accuracy']) \n\n return model\n\n\ndef KerasVGG(input_shape, n_class):\n \"\"\"\n ๆจกๅž‹้‡‡็”จ็ฑปไผผไบŽ VGG16 ็š„็ป“ๆž„๏ผš\n ไฝฟ็”จๅ›บๅฎšๅฐบๅฏธ็š„ๅฐๅท็งฏๆ ธ (3x3)\n ไปฅ2็š„ๅน‚ๆฌก้€’ๅขž็š„ๅท็งฏๆ ธๆ•ฐ้‡ (64, 128, 256)\n ไธคๅฑ‚ๅท็งฏๆญ้…ไธ€ๅฑ‚ๆฑ ๅŒ–\n ๅ…จ่ฟžๆŽฅๅฑ‚ๆฒกๆœ‰้‡‡็”จ VGG16 ๅบžๅคง็š„ไธ‰ๅฑ‚็ป“ๆž„๏ผŒ้ฟๅ…่ฟ็ฎ—้‡่ฟ‡ๅคง๏ผŒไป…ไฝฟ็”จ 128 ไธช่Š‚็‚น็š„ๅ•ไธชFC\n ๆƒ้‡ๅˆๅง‹ๅŒ–้‡‡็”จHe Normal\n :return:\n \"\"\"\n name = 'VGG'\n inputs = Input(shape=input_shape)\n net = inputs\n # (32, 32, 3)-->(32, 32, 64)\n net = Conv2D(filters=16, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (32, 32, 64)-->(32, 32, 64)\n net = Conv2D(filters=16, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (32, 32, 64)-->(16, 16, 64)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (16, 16, 64)-->(16, 16, 128)\n net = Conv2D(filters=32, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (16, 16, 64)-->(16, 16, 128)\n net = Conv2D(filters=32, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (16, 16, 128)-->(8, 8, 128)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (8, 8, 128)-->(8, 8, 256)\n net = Conv2D(filters=64, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (8, 8, 256)-->(8, 8, 256)\n net = Conv2D(filters=64, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n # (8, 8, 256)-->(4, 4, 256)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (4, 4, 256) --> 4*4*256=4096\n net = Flatten()(net)\n # 4096 --> 128 or 64??\n net = Dense(units=128, activation='relu',\n kernel_initializer='he_normal')(net)\n # Dropout\n net = Dropout(0.5)(net)\n # 128 --> 10\n net = Dense(units=n_class, activation='softmax',\n kernel_initializer='he_normal')(net)\n return inputs, net, name\n\n\ndef KerasBN(input_shape, n_class):\n \"\"\"\n ๆทปๅŠ batch norm ๅฑ‚\n :return:\n \"\"\"\n name = 'BN'\n inputs = Input(shape=input_shape)\n net = inputs\n\n # (32, 32, 3)-->(32, 32, 64)\n net = Conv2D(filters=16, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (32, 32, 64)-->(32, 32, 64)\n net = Conv2D(filters=16, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (32, 32, 64)-->(16, 16, 64)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (16, 16, 64)-->(16, 16, 128)\n net = Conv2D(filters=32, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (16, 16, 64)-->(16, 16, 128)\n net = Conv2D(filters=32, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (16, 16, 128)-->(8, 8, 128)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (8, 8, 128)-->(8, 8, 256)\n net = Conv2D(filters=64, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (8, 8, 128)-->(8, 8, 256)\n net = Conv2D(filters=64, kernel_size=3, strides=1,\n padding='same', activation='relu',\n kernel_initializer='he_normal')(net)\n net = BatchNormalization()(net)\n net = Activation('relu')(net)\n # (8, 8, 256)-->(4, 4, 256)\n net = MaxPooling2D(pool_size=2, strides=2, padding='valid')(net)\n\n # (4, 4, 256) --> 4*4*256=4096\n net = Flatten()(net)\n # 4096 --> 128 or 64 ??\n net = Dense(units=128, activation='relu',\n kernel_initializer='he_normal')(net)\n # Dropout\n net = Dropout(0.5)(net)\n # 128 --> 10\n net = Dense(units=n_class, activation='softmax',\n kernel_initializer='he_normal')(net)\n\n return inputs, net, name\n\n\n\ndef train():\n batch_size = 128\n epochs = 10\n model_name = \"cnn/models/handposes_vgg64_v1.h5\"\n\n # input image dimensions\n img_rows, img_cols = 64, 64\n\n # the data, shuffled and split between train and test sets\n x_train, y_train, x_test, y_test = dataset.load_data(poses=[\"all\"], im_size=64)\n\n num_classes = len(np.unique(y_test))\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n inputs, logits, name = KerasVGG(input_shape, num_classes)\n model = Model(inputs=inputs, outputs=logits, name=name)\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n #model = SimpleCNN(input_shape, num_classes)\n\n\n\n model.summary() \n\n ####### TRAINING #######\n hist = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n # Evaluation\n score = model.evaluate(x_test, y_test, verbose=1)\n\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n model.save(model_name)\n\n # plotting the metrics\n plt.figure()\n plt.subplot(2,1,1)\n plt.plot(hist.history['acc'])\n plt.plot(hist.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='lower right')\n\n plt.subplot(2,1,2)\n plt.plot(hist.history['loss'])\n plt.plot(hist.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper right')\n\n plt.tight_layout()\n plt.show()\n\nif __name__ == \"__main__\":\n train()\n" } ]
4
gastlygem/lpthw-cn
https://github.com/gastlygem/lpthw-cn
d5971ae8361777c50e9e3681f0dac3383b52ae48
46c80c420e7d47a6ea5c87cf67a302c03d4ace91
cc5792847641cfd1e17bcc6308a84ef725e85059
refs/heads/master
2016-09-08T07:14:48.235669
2016-07-13T07:24:44
2016-07-13T07:24:44
3,645,791
5
2
null
null
null
null
null
[ { "alpha_fraction": 0.6983643770217896, "alphanum_fraction": 0.7036896347999573, "avg_line_length": 43.54237365722656, "blob_id": "56617f2f7f3e1bf123d382a5cdddb3effe0e83fd", "content_id": "09ad36958a4abb7e6a85b040f2678b135c56080e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2629, "license_type": "no_license", "max_line_length": 80, "num_lines": 59, "path": "/ex21.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 21: Functions Can Return Something\n*******************************************\n\nYou have been using the ``=`` character to name variables and set them\nto numbers or strings. We're now going to blow your mind again by\nshowing you how to use ``=`` and a new Python word ``return`` to \nset variables to be a *value from a function*. There will be one\nthing to pay close attention to, but first type this in:\n\n\n.. literalinclude:: ex/ex21.py\n :linenos:\n\nWe are now doing our own math functions for ``add``, ``subtract``, ``multiply``,\nand ``divide``. The important thing to notice is the last line where we\nsay ``return a + b`` (in ``add``). What this does is the following:\n\n1. Our function is called with two arguments: ``a`` and ``b``.\n2. We print out what our function is doing, in this case \"ADDING\".\n3. Then we tell Python to do something kind of backward: we return\n the addition of ``a + b``. You might say this as, \"I add ``a`` and ``b``\n then return them.\"\n4. Python adds the two numbers. Then when the function ends any line\n that runs it will be able to assign this ``a + b`` result to a variable.\n\nAs with many other things in this book, you should take this real slow,\nbreak it down and try to trace what's going on. To help there's extra\ncredit to get you to solve a puzzle and learn something cool.\n\n\nWhat You Should See\n===================\n\n\n.. literalinclude:: ex/ex21.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. If you aren't really sure what ``return`` does, try writing a few of\n your own functions and have them return some values. You can return\n anything that you can put to the right of an ``=``.\n2. At the end of the script is a puzzle. I'm taking the return value\n of one function, and *using* it as the argument of another function.\n I'm doing this in a chain so that I'm kind of creating a formula using\n the functions. It looks really weird, but if you run the script you \n can see the results. What you should do is try to figure out the\n normal formula that would recreate this same set of operations.\n3. Once you have the formula worked out for the puzzle, get in there\n and see what happens when you modify the parts of the functions. \n Try to change it on purpose to make another value.\n4. Finally, do the inverse. Write out a simple formula and use the \n functions in the same way to calculate it.\n\nThis exercise might really whack your brain out, but take it slow and easy and\ntreat it like a little game. Figuring out puzzles like this is what makes\nprogramming fun, so I'll be giving you more little problems like this as we go.\n\n" }, { "alpha_fraction": 0.7704179883003235, "alphanum_fraction": 0.7723472714424133, "avg_line_length": 52.55172348022461, "blob_id": "a83edee8f5e59a26fc79bdca3952ad1ca3c6fecc", "content_id": "6d139fa291637f22b470e18b3cb51d9633b3637e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1555, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/next.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Next Steps\n**********\n\nYou're not a programmer quite yet. I like to think of this book as giving you\nyour \"programming brown belt\". You know enough to start another\nbook on programming and handle it just fine. This book should have given you\nthe mental tools and attitude you need to go through most Python books and\nactually learn something. It might even make it easy.\n\nI recommend you continue with http://www.djangobook.com/ and start going\nthrough the 2nd Edition of *The Django Book*. Even if you never plan on doing\nPython web programming, going through the book will cement your skills in\nPython using an actual practical activity. It is also a better framework than\nthe ``lpthw.web`` you were using, but all of the concepts you've learned so far\napply to the ``Django`` web framework. Just take your time, ask questions, and\nyou'll get through it.\n\nYou could probably start hacking away at some programs right now, and if you\nhave that itch, go ahead. Just understand anything you write will probably\nsuck. That's alright though, I suck at every programming language I first\nstart using. Nobody writes pure perfect gold when they're a beginner, and\nanyone who tells you they did is a huge liar.\n\nFinally, remember that this is something you have to do at least a couple hours\na night for a while before you can get good. If it helps, while you're\nstruggling to learn Python every night, I'm hard at work learning to play\nguitar. I work at it about 2 or 4 hours a day and still practice scales.\n\nEveryone is a beginner at something.\n\n\n" }, { "alpha_fraction": 0.6235842704772949, "alphanum_fraction": 0.6322451829910278, "avg_line_length": 22.4375, "blob_id": "3374411017f993f1d251d90f911a7bd36dd0b6f1", "content_id": "b7580a267b03a7a7a4a4145eadd37b28f24d7261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5300, "license_type": "no_license", "max_line_length": 76, "num_lines": 128, "path": "/cn/ex49.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 49: ๅˆ›ๅปบๅฅๅญ\n*****************************\n\nไปŽๆˆ‘ไปฌ่ฟ™ไธชๅฐๆธธๆˆ็š„่ฏๆฑ‡ๆ‰ซๆๅ™จไธญ๏ผŒๆˆ‘ไปฌๅบ”่ฏฅๅฏไปฅๅพ—ๅˆฐ็ฑปไผผไธ‹้ข็š„ๅˆ—่กจ๏ผš\n\n.. code-block:: pycon\n\n >>> from ex48 import lexicon\n >>> print lexicon.scan(\"go north\")\n [('verb', 'go'), ('direction', 'north')]\n >>> print lexicon.scan(\"kill the princess\")\n [('verb', 'kill'), ('stop', 'the'), ('noun', 'princess')]\n >>> print lexicon.scan(\"eat the bear\")\n [('verb', 'eat'), ('stop', 'the'), ('noun', 'bear')]\n >>> print lexicon.scan(\"open the door and smack the bear in the nose\")\n [('error', 'open'), ('stop', 'the'), ('noun', 'door'), ('error', 'and'),\n ('error', 'smack'), ('stop', 'the'), ('noun', 'bear'), ('stop', 'in'),\n ('stop', 'the'), ('error', 'nose')]\n >>> \n\n็Žฐๅœจ่ฎฉๆˆ‘ไปฌๆŠŠๅฎƒ่ฝฌๅŒ–ๆˆๆธธๆˆๅฏไปฅไฝฟ็”จ็š„ไธœ่ฅฟ๏ผŒไนŸๅฐฑๆ˜ฏไธ€ไธช Sentence ็ฑปใ€‚\n\nๅฆ‚ๆžœไฝ ่ฟ˜่ฎฐๅพ—ๅญฆๆ กๅญฆ่ฟ‡็š„ไธœ่ฅฟ็š„่ฏ๏ผŒไธ€ไธชๅฅๅญๆ˜ฏ็”ฑ่ฟ™ๆ ท็š„็ป“ๆž„็ป„ๆˆ็š„๏ผš\n\n ไธป่ฏญ(Subject) + ่ฐ“่ฏญ(ๅŠจ่ฏ Verb) + ๅฎพ่ฏญ(Object)\n\nๅพˆๆ˜พ็„ถๅฎž้™…็š„ๅฅๅญๅฏ่ƒฝไผšๆฏ”่ฟ™ๅคๆ‚๏ผŒ่€Œไฝ ๅฏ่ƒฝๅทฒ็ปๅœจ่‹ฑ่ฏญ็š„่ฏญๆณ•่ฏพไธŠ้ข่ขซๆŠ˜่…พๅพ—ๅคŸๅ‘›ไบ†ใ€‚\\\nๆˆ‘ไปฌ็š„็›ฎ็š„๏ผŒๆ˜ฏๅฐ†ไธŠ้ข็š„ๅ…ƒ็ป„ๅˆ—่กจ่ฝฌๆขไธบไธ€ไธช Sentence ๅฏน่ฑก๏ผŒ่€Œ่ฟ™ไธชๅฏน่ฑกๅˆๅŒ…ๅซไธป่ฐ“ๅฎพ\\\nๅ„ไธชๆˆๅ‘˜ใ€‚\n\nๅŒน้…(Match)ๅ’Œ็ชฅ่ง†(Peek)\n==========================\n\nไธบไบ†่พพๅˆฐ่ฟ™ไธชๆ•ˆๆžœ๏ผŒไฝ ้œ€่ฆๅ››ๆ ทๅทฅๅ…ท๏ผš\n\n1. ๅพช็Žฏ่ฎฟ้—ฎๅ…ƒ็ป„ๅˆ—่กจ็š„ๆ–นๆณ•๏ผŒ่ฟ™ๆŒบ็ฎ€ๅ•็š„ใ€‚\n2. ๅŒน้…ๆˆ‘ไปฌ็š„ไธป่ฐ“ๅฎพ่ฎพ็ฝฎไธญไธๅŒ็ง็ฑปๅ…ƒ็ป„็š„ๆ–นๆณ•ใ€‚\n3. ไธ€ไธชโ€œ็ชฅ่ง†โ€ๆฝœๅœจๅ…ƒ็ป„็š„ๆ–นๆณ•๏ผŒไปฅไพฟๅšๅ†ณๅฎšๆ—ถ็”จๅˆฐใ€‚\n4. ่ทณ่ฟ‡(skip)ๆˆ‘ไปฌไธๅœจไนŽ็š„ๅ†…ๅฎน็š„ๆ–นๆณ•๏ผŒไพ‹ๅฆ‚ๅฝขๅฎน่ฏใ€ๅ† ่ฏ็ญ‰ๆฒกๆœ‰็”จๅค„็š„่ฏๆฑ‡ใ€‚\n\nๆˆ‘ไปฌไฝฟ็”จ peek ๅ‡ฝๆ•ฐๆฅๆŸฅ็œ‹ๅ…ƒ็ป„ๅˆ—่กจไธญ็š„ไธ‹ไธ€ไธชๆˆๅ‘˜๏ผŒๅšๅŒน้…ไปฅๅŽๅ†ๅฏนๅฎƒๅšไธ‹ไธ€ๆญฅๅŠจไฝœใ€‚\\\n่ฎฉๆˆ‘ไปฌๅ…ˆ็œ‹็œ‹่ฟ™ไธช peek ๅ‡ฝๆ•ฐ๏ผš\n\n.. code-block:: python\n\n def peek(word_list):\n if word_list:\n word = word_list[0]\n return word[0]\n else:\n return None\n \nๅพˆ็ฎ€ๅ•ใ€‚ๅ†็œ‹็œ‹ match ๅ‡ฝๆ•ฐ๏ผš\n\n.. code-block:: python\n\n def match(word_list, expecting):\n if word_list:\n word = word_list.pop(0)\n\n if word[0] == expecting:\n return word\n else:\n return None\n else:\n return None\n\n่ฟ˜ๆ˜ฏๅพˆ็ฎ€ๅ•๏ผŒๆœ€ๅŽๆˆ‘ไปฌ็œ‹็œ‹ skip ๅ‡ฝๆ•ฐ:\n\n.. code-block:: python\n\n def skip(word_list, word_type):\n while peek(word_list) == word_type:\n match(word_list, word_type)\n\nไปฅไฝ ็Žฐๅœจ็š„ๆฐดๅนณ๏ผŒไฝ ๅบ”่ฏฅๅฏไปฅ็œ‹ๅ‡บๅฎƒไปฌ็š„ๅŠŸ่ƒฝๆฅใ€‚็กฎ่ฎค่‡ชๅทฑ็œŸ็š„ๅผ„ๆ‡‚ไบ†ๅฎƒไปฌใ€‚\n\n\nๅฅๅญ็š„่ฏญๆณ•\n====================\n\nๆœ‰ไบ†ๅทฅๅ…ท๏ผŒๆˆ‘ไปฌ็ŽฐๅœจๅฏไปฅไปŽๅ…ƒ็ป„ๅˆ—่กจๆฅๆž„ๅปบๅฅๅญ(Sentence)ๅฏน่ฑกไบ†ใ€‚ๆˆ‘ไปฌ็š„ๅค„็†ๆต็จ‹ๅฆ‚ไธ‹๏ผš\n\n1. ไฝฟ็”จ ``peek`` ่ฏ†ๅˆซไธ‹ไธ€ไธชๅ•่ฏใ€‚\n2. ๅฆ‚ๆžœ่ฟ™ไธชๅ•่ฏๅ’Œๆˆ‘ไปฌ็š„่ฏญๆณ•ๅŒน้…๏ผŒๆˆ‘ไปฌๅฐฑ่ฐƒ็”จไธ€ไธชๅ‡ฝๆ•ฐๆฅๅค„็†่ฟ™้ƒจๅˆ†่ฏญๆณ•ใ€‚ๅ‡่ฎพๅ‡ฝๆ•ฐ็š„ๅๅญ—\\\n ๅซ ``parse_subject`` ๅฅฝไบ†ใ€‚\n3. ๅฆ‚ๆžœ่ฏญๆณ•ไธๅŒน้…๏ผŒๆˆ‘ไปฌๅฐฑ ``raise`` ไธ€ไธช้”™่ฏฏ๏ผŒๆŽฅไธ‹ๆฅไฝ ไผšๅญฆๅˆฐ่ฟ™ๆ–น้ข็š„ๅ†…ๅฎนใ€‚\n4. ๅ…จ้ƒจๅˆ†ๆžๅฎŒไปฅๅŽ๏ผŒๆˆ‘ไปฌๅบ”่ฏฅ่ƒฝๅพ—ๅˆฐไธ€ไธช Sentence ๅฏน่ฑก๏ผŒ็„ถๅŽๅฏไปฅๅฐ†ๅ…ถๅบ”็”จๅœจๆˆ‘ไปฌ็š„ๆธธๆˆไธญใ€‚\n\nๆผ”็คบ่ฟ™ไธช่ฟ‡็จ‹ๆœ€็ฎ€ๅ•็š„ๆ–นๆณ•ๆ˜ฏๆŠŠไปฃ็ ๅฑ•็คบ็ป™ไฝ ่ฎฉไฝ ้˜…่ฏป๏ผŒไธ่ฟ‡่ฟ™่Š‚ไน ้ข˜ๆœ‰ไธชไธไธ€ๆ ท็š„่ฆๆฑ‚๏ผŒๅ‰้ข\\\nๆ˜ฏๆˆ‘็ป™ไฝ ๆต‹่ฏ•ไปฃ็ ๏ผŒไฝ ็…ง็€ๅ†™ๅ‡บ็จ‹ๅบๆฅ๏ผŒ่€Œ่ฟ™ๆฌกๆ˜ฏๆˆ‘็ป™ไฝ ็š„็จ‹ๅบ๏ผŒ่€Œไฝ ่ฆไธบๅฎƒๅ†™ๅ‡บๆต‹่ฏ•ไปฃ็ ๆฅใ€‚\n\nไปฅไธ‹ๅฐฑๆ˜ฏๆˆ‘ๅ†™็š„็”จๆฅ่งฃๆž็ฎ€ๅ•ๅฅๅญ็š„ไปฃ็ ๏ผŒๅฎƒไฝฟ็”จไบ† ``ex48.lexicon`` ่ฟ™ไธชๆจก็ป„ใ€‚\n\n.. literalinclude:: ex/ex49.py\n\nๅ…ณไบŽๅผ‚ๅธธ(Exception)\n====================\n\nไฝ ๅทฒ็ป็ฎ€ๅ•ๅญฆ่ฟ‡ๅ…ณไบŽๅผ‚ๅธธ็š„ไธ€ไบ›ไธœ่ฅฟ๏ผŒไฝ†่ฟ˜ๆฒกๅญฆ่ฟ‡ๆ€Žๆ ทๆŠ›ๅ‡บ(raise)ๅฎƒไปฌใ€‚่ฟ™่Š‚็š„ไปฃ็ ๆผ”็คบไบ†\\\nๅฆ‚ไฝ• raiseใ€‚้ฆ–ๅ…ˆๅœจๆœ€ๅ‰้ข๏ผŒไฝ ่ฆๅฎšไน‰ๅฅฝ ``ParserException`` ่ฟ™ไธช็ฑป๏ผŒ่€Œๅฎƒๅˆๆ˜ฏ ``Exception``\n็š„ไธ€็งใ€‚ๅฆๅค–่ฆๆณจๆ„ๆˆ‘ไปฌๆ˜ฏๆ€Žๆ ทไฝฟ็”จ ``raise`` ่ฟ™ไธชๅ…ณ้”ฎๅญ—ๆฅๆŠ›ๅ‡บๅผ‚ๅธธ็š„ใ€‚\n\nไฝ ็š„ๆต‹่ฏ•ไปฃ็ ๅบ”่ฏฅไนŸ่ฆๆต‹่ฏ•ๅˆฐ่ฟ™ไบ›ๅผ‚ๅธธ๏ผŒ่ฟ™ไธชๆˆ‘ไนŸไผšๆผ”็คบ็ป™ไฝ ๅฆ‚ไฝ•ๅฎž็Žฐใ€‚\n\n\nไฝ ๅบ”่ฏฅๆต‹่ฏ•็š„ไธœ่ฅฟ\n====================\n\nไธบใ€Šไน ้ข˜ 49ใ€‹ๅ†™ไธ€ไธชๅฎŒๆ•ด็š„ๆต‹่ฏ•ๆ–นๆกˆ๏ผŒ็กฎ่ฎคไปฃ็ ไธญๆ‰€ๆœ‰็š„ไธœ่ฅฟ้ƒฝ่ƒฝๆญฃๅธธๅทฅไฝœ๏ผŒๅ…ถไธญๅผ‚ๅธธ็š„ๆต‹่ฏ•โ€”โ€”\\\n่พ“ๅ…ฅไธ€ไธช้”™่ฏฏ็š„ๅฅๅญๅฎƒไผšๆŠ›ๅ‡บไธ€ไธชๅผ‚ๅธธๆฅใ€‚\n\nไฝฟ็”จ ``assert_raises`` ่ฟ™ไธชๅ‡ฝๆ•ฐๆฅๆฃ€ๆŸฅๅผ‚ๅธธ๏ผŒๅœจ nose ็š„ๆ–‡ๆกฃ้‡ŒๆŸฅ็œ‹็›ธๅ…ณ็š„ๅ†…ๅฎน๏ผŒๅญฆ็€ไฝฟ็”จๅฎƒ\\\nๅ†™้’ˆๅฏนโ€œๆ‰ง่กŒๅคฑ่ดฅโ€็š„ๆต‹่ฏ•๏ผŒ่ฟ™ไนŸๆ˜ฏๆต‹่ฏ•ๅพˆ้‡่ฆ็š„ไธ€ไธชๆ–น้ขใ€‚ไปŽ nose ๆ–‡ๆกฃไธญๅญฆไผšไฝฟ็”จ \n``assert_raises``\\๏ผŒไปฅๅŠไธ€ไบ›ๅˆซ็š„ๅ‡ฝๆ•ฐใ€‚\n\nๅ†™ๅฎŒๆต‹่ฏ•ไปฅๅŽ๏ผŒไฝ ๅบ”่ฏฅๅฐฑๆ˜Ž็™ฝไบ†่ฟ™ๆฎต็จ‹ๅบ็š„ๅทฅไฝœๅŽŸ็†๏ผŒ่€Œไธ”ไนŸๅญฆไผšไบ†ๅฆ‚ไฝ•ไธบๅˆซไบบ็š„็จ‹ๅบๅ†™ๆต‹่ฏ•ไปฃ็ ใ€‚\n็›ธไฟกๆˆ‘๏ผŒ่ฟ™ๆ˜ฏไธ€ไธช้žๅธธๆœ‰็”จ็š„ๆŠ€่ƒฝใ€‚\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ไฟฎๆ”น ``parse_`` ๅ‡ฝๆ•ฐ๏ผˆๆ–นๆณ•๏ผ‰๏ผŒๅฐ†ๅฎƒไปฌๆ”พๅˆฐไธ€ไธช็ฑป้‡Œ่พน๏ผŒ่€Œไธไป…ไป…ๆ˜ฏ็‹ฌ็ซ‹็š„ๆ–นๆณ•ๅ‡ฝๆ•ฐใ€‚่ฟ™ไธค็ง\\\n ็จ‹ๅบ่ฎพ่ฎกไฝ ๅ–œๆฌขๅ“ชไธ€็งๅ‘ข๏ผŸ\n2. ๆ้ซ˜ parser ๅฏนไบŽ้”™่ฏฏ่พ“ๅ…ฅ็š„ๆŠตๅพก่ƒฝๅŠ›๏ผŒ่ฟ™ๆ ทๅณไฝฟ็”จๆˆท่พ“ๅ…ฅไบ†ไฝ ้ข„ๅฎšไน‰่ฏญๆฑ‡ไน‹ๅค–็š„่ฏ่ฏญ๏ผŒไฝ ็š„็จ‹ๅบ\\\n ไนŸ่ƒฝๆญฃๅธธ่ฟ่กŒไธ‹ๅŽปใ€‚\n3. ๆ”น่ฟ›่ฏญๆณ•๏ผŒ่ฎฉๅฎƒๅฏไปฅๅค„็†ๆ›ดๅคš็š„ไธœ่ฅฟ๏ผŒไพ‹ๅฆ‚ๆ•ฐๅญ—ใ€‚\n4. ๆƒณๆƒณๅœจๆธธๆˆ้‡Œไฝ ็š„ Sentence ็ฑปๅฏไปฅๅฏน็”จๆˆท่พ“ๅ…ฅๅšๅ“ชไบ›ๆœ‰่ถฃ็š„ไบ‹ๆƒ…ใ€‚\n\n\n" }, { "alpha_fraction": 0.6784558892250061, "alphanum_fraction": 0.6890230774879456, "avg_line_length": 49.92307662963867, "blob_id": "1d1974feacfdf63429a735db347241706d5fff3f", "content_id": "ef1544b902c755346a2a210901c0f30ed1b45fdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4637, "license_type": "no_license", "max_line_length": 133, "num_lines": 91, "path": "/ex39.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 39: Doing Things To Lists\n**********************************\n\nYou have learned about lists. When you learned about ``while-loops`` you\n\"appended\" numbers to the end of a list and printed them out. There was also\nextra credit where you were supposed to find all the other things you can do to\nlists in the Python documentation. That was a while back, so go find in the\nbook where you did that and review if you do not know what I'm talking about.\n\nFound it? Remember it? Good. When you did this you had a list, and you \"called\" the\nfunction ``append`` on it. However, you may not really understand what's going on so let's see what\nwe can do to lists.\n\nWhen you type Python code that reads ``mystuff.append('hello')`` you are actually\nsetting off a chain of events inside Python to cause something to happen to the\n``mystuff`` list. Here's how it works:\n\n1. Python sees you mentioned ``mystuff`` and looks up that variable. It might have to\n look backwards to see if you created with ``=``, look and see if it is a function\n argument, or maybe it's a global variable. Either way it has to find the ``mystuff``\n first.\n2. Once it finds ``mystuff`` it then hits the ``.`` (period) operator and starts to look\n at *variables* that are a part of ``mystuff``. Since ``mystuff`` is a list, it knows\n that ``mystuff`` has a bunch of functions.\n3. It then hits ``append`` and compares the name \"append\" to all the ones that ``mystuff``\n says it owns. If append is in there (it is) then it grabs *that* to use.\n4. Next Python sees the ``(`` (parenthesis) and realizes, \"Oh hey, this should be a function.\"\n At this point it *calls* (aka runs, executes) the function just like normally, but instead it calls the function\n with an *extra* argument.\n5. That *extra* argument is ... ``mystuff``! I know, weird right? But that's how Python\n works so it's best to just remember it and assume that's alright. What happens then, at \n the end of all this is a function call that looks like: ``append(mystuff, 'hello')``\n instead of what you read which is ``mystuff.append('hello')``.\n\n\nFor the most part you do not have to know that this is going on, but it helps when you\nget error messages from python like this:\n\n.. code-block:: pycon\n\n $ python\n Python 2.6.5 (r265:79063, Apr 16 2010, 13:57:41) \n [GCC 4.4.3] on linux2\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> class Thing(object):\n ... def test(hi):\n ... print \"hi\"\n ... \n >>> a = Thing()\n >>> a.test(\"hello\")\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: test() takes exactly 1 argument (2 given)\n >>> \n\nWhat was all that? Well, this is me typing into the Python shell and showing you\nsome magic. You haven't seen ``class`` yet but we'll get into those later. For \nnow you see how Python said ``test() takes exactly 1 argument (2 given)``. If\nyou see this it means that python changed ``a.test(\"hello\")`` to ``test(a, \"hello\")``\nand that somewhere someone messed up and didn't add the argument for ``a``.\n\nThat might be a lot to take in, but we're going to spend a few exercises getting this\nconcept firm in your brain. To kick things off, here's an exercise that mixes\nstrings and lists for all kinds of fun.\n\n.. literalinclude:: ex/ex39.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex39.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Take each function that is called, and go through the steps outlined above to translate them to what\n Python does. For example, ``' '.join(things)`` is ``join(' ', things)``.\n2. Translate these two ways to view the function calls in English. For example, ``' '.join(things)``\n reads as, \"Join things with ' ' between them.\" Meanwhile, ``join(' ', things)`` means,\n \"Call join with ' ' and things.\" Understand how they are really the same thing.\n3. Go read about \"Object Oriented Programming\" online. Confused? Yeah I was too. Do not worry.\n You will learn enough to be dangerous, and you can slowly learn more later.\n4. Read up on what a \"class\" is in Python. *Do not read about how other languages use the word \"class\".* That will only mess you up.\n5. What's the relationship between ``dir(something)`` and the \"class\" of ``something``?\n6. If you do not have any idea what I'm talking about do not worry. Programmers like to feel smart\n so they invented Object Oriented Programming, named it OOP, and then used it way too much. If you\n think that's hard, you should try to use \"functional programming\".\n\n\n\n" }, { "alpha_fraction": 0.5792951583862305, "alphanum_fraction": 0.6101321578025818, "avg_line_length": 15.777777671813965, "blob_id": "4874ede8c1692a85518a9093bc208510d4af94e1", "content_id": "c60c255bf94a223c3bed23b3b39cf24b0814866a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 872, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/cn/ex29.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 29: ๅฆ‚ๆžœ(if)\n********************\n\nไธ‹้ขๆ˜ฏไฝ ่ฆๅ†™็š„ไฝœไธš๏ผŒ่ฟ™ๆฎตๅ‘ไฝ ไป‹็ปไบ†โ€œif่ฏญๅฅโ€ใ€‚ๆŠŠ่ฟ™ๆฎต่พ“ๅ…ฅ่ฟ›ๅŽป๏ผŒ่ฎฉๅฎƒ่ƒฝๆญฃ็กฎๆ‰ง่กŒใ€‚็„ถๅŽ\\\nๆˆ‘ไปฌ็œ‹็œ‹ไฝ ๆ˜ฏๅฆๆœ‰ๆ‰€ๆ”ถ่Žทใ€‚\n\n.. literalinclude:: ex/ex29.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex29.txt\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n็Œœ็Œœโ€œif่ฏญๅฅโ€ๆ˜ฏไป€ไนˆ๏ผŒๅฎƒๆœ‰ไป€ไนˆ็”จๅค„ใ€‚ๅœจๅšไธ‹ไธ€้“ไน ้ข˜ๅ‰๏ผŒ่ฏ•็€็”จ่‡ชๅทฑ็š„่ฏๅ›ž็ญ”ไธ‹้ข็š„้—ฎ้ข˜:\n\n1. ไฝ ่ฎคไธบ ``if`` ๅฏนไบŽๅฎƒไธ‹ไธ€่กŒ็š„ไปฃ็ ๅšไบ†ไป€ไนˆ๏ผŸ\n2. ไธบไป€ไนˆ ``if`` ่ฏญๅฅ็š„ไธ‹ไธ€่กŒ้œ€่ฆ 4 ไธช็ฉบๆ ผ็š„็ผฉ่ฟ›๏ผŸ\n3. ๅฆ‚ๆžœไธ็ผฉ่ฟ›๏ผŒไผšๅ‘็”Ÿไป€ไนˆไบ‹ๆƒ…๏ผŸ\n4. ๆŠŠไน ้ข˜ 26 ไธญ็š„ๅ…ถๅฎƒๅธƒๅฐ”่กจ่พพๅผๆ”พๅˆฐ``if่ฏญๅฅ``ไธญไผšไธไผšไนŸๅฏไปฅ่ฟ่กŒๅ‘ข๏ผŸ่ฏ•ไธ€ไธ‹ใ€‚\n5. ๅฆ‚ๆžœๆŠŠๅ˜้‡ ``people``, ``cats``, ๅ’Œ ``dogs`` ็š„ๅˆๅง‹ๅ€ผๆ”นๆމ๏ผŒไผšๅ‘็”Ÿไป€ไนˆไบ‹ๆƒ…๏ผŸ\n\n" }, { "alpha_fraction": 0.6610338091850281, "alphanum_fraction": 0.6804174780845642, "avg_line_length": 21.595504760742188, "blob_id": "ac8fac94baf82b1b21ec4580ba87c36b328a175a", "content_id": "75e5e94247574e8fedba6641942d30ee25d51e14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3866, "license_type": "no_license", "max_line_length": 65, "num_lines": 89, "path": "/cn/ex13.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 13: ๅ‚ๆ•ฐใ€่งฃๅŒ…ใ€ๅ˜้‡\n*********************************************\n\nๅœจ่ฟ™่Š‚็ปƒไน ไธญ๏ผŒๆˆ‘ไปฌๅฐ†้™ๅˆฐๅฆๅค–ไธ€็งๅฐ†ๅ˜้‡ไผ ้€’็ป™่„šๆœฌ็š„ๆ–นๆณ•(ๆ‰€่ฐ“่„šๆœฌ๏ผŒๅฐฑๆ˜ฏไฝ ๅ†™็š„ ``.py`` \n็จ‹ๅบ)ใ€‚ไฝ ๅทฒ็ป็Ÿฅ้“๏ผŒๅฆ‚ๆžœ่ฆ่ฟ่กŒ ``ex13.py``\\๏ผŒๅช่ฆๅœจๅ‘ฝไปค่กŒ่ฟ่กŒ ``python ex13.py`` ๅฐฑ\\\nๅฏไปฅไบ†ใ€‚่ฟ™ๅฅๅ‘ฝไปคไธญ็š„ ``ex13.py`` ้ƒจๅˆ†ๅฐฑๆ˜ฏๆ‰€่ฐ“็š„โ€œๅ‚ๆ•ฐ(argument)โ€๏ผŒๆˆ‘ไปฌ็Žฐๅœจ่ฆๅš็š„\\\nๅฐฑๆ˜ฏๅ†™ไธ€ไธชๅฏไปฅๆŽฅๅ—ๅ‚ๆ•ฐ็š„่„šๆœฌใ€‚\n\nๅฐ†ไธ‹้ข็š„็จ‹ๅบๅ†™ไธ‹ๆฅ๏ผŒๅŽ้ขไฝ ๅฐ†็œ‹ๅˆฐ่ฏฆ็ป†่งฃ้‡Šใ€‚\n\n.. literalinclude:: ex/ex13.py\n :linenos:\n\nๅœจ็ฌฌ 1 ่กŒๆˆ‘ไปฌๆœ‰ไธ€ไธชโ€œimportโ€่ฏญๅฅ. ่ฟ™ๆ˜ฏไฝ ๅฐ† python ็š„ๅŠŸ่ƒฝๅผ•ๅ…ฅไฝ ็š„่„šๆœฌ็š„ๆ–นๆณ•. Python\nไธไผšไธ€ไธ‹ๅญๅฐ†ๅฎƒๆ‰€ๆœ‰็š„ๅŠŸ่ƒฝ็ป™ไฝ ๏ผŒ่€Œๆ˜ฏ่ฎฉไฝ ้œ€่ฆไป€ไนˆๅฐฑ่ฐƒ็”จไป€ไนˆใ€‚่ฟ™ๆ ทๅฏไปฅ่ฎฉไฝ ็š„็จ‹ๅบไฟๆŒ\\\n็ฒพ็ฎ€๏ผŒ่€ŒๅŽ้ข็š„็จ‹ๅบๅ‘˜็œ‹ๅˆฐไฝ ็š„ไปฃ็ ็š„ๆ—ถๅ€™๏ผŒ่ฟ™ไบ›โ€œimportโ€ๅฏไปฅไฝœไธบๆ็คบ๏ผŒ่ฎฉไป–ไปฌๆ˜Ž็™ฝไฝ ็š„\\\nไปฃ็ ็”จๅˆฐไบ†ๅ“ชไบ›ๅŠŸ่ƒฝใ€‚\n\n\n``argv`` ๆ˜ฏๆ‰€่ฐ“็š„โ€œๅ‚ๆ•ฐๅ˜้‡(argument variable)โ€๏ผŒๆ˜ฏไธ€ไธช้žๅธธๆ ‡ๅ‡†็š„็ผ–็จ‹ๆœฏ่ฏญใ€‚ๅœจๅ…ถไป–\\\n็š„็ผ–็จ‹่ฏญ่จ€้‡Œไฝ ไนŸๅฏไปฅ็œ‹ๅˆฐๅฎƒใ€‚่ฟ™ไธชๅ˜้‡ๅŒ…ๅซไบ†ไฝ ไผ ้€’็ป™ Python ็š„ๅ‚ๆ•ฐใ€‚้€š่ฟ‡ๅŽ้ข็š„็ปƒไน \\\nไฝ ๅฐ†ๅฏนๅฎƒๆœ‰ๆ›ดๅคš็š„ไบ†่งฃใ€‚\n\n็ฌฌ 3 ่กŒๅฐ† ``argv`` โ€œ่งฃๅŒ…(unpack)โ€๏ผŒไธŽๅ…ถๅฐ†ๆ‰€ๆœ‰ๅ‚ๆ•ฐๆ”พๅˆฐๅŒไธ€ไธชๅ˜้‡ไธ‹้ข๏ผŒๆˆ‘ไปฌๅฐ†ๆฏไธชๅ‚ๆ•ฐ\\\n่ต‹ไบˆไธ€ไธชๅ˜้‡ๅ๏ผš ``script``, ``first``, ``second``, ไปฅๅŠ ``third``\\ใ€‚่ฟ™ไนŸ่ฎธ็œ‹ไธŠๅŽป\\\nๆœ‰ไบ›ๅฅ‡ๆ€ช, ไธ่ฟ‡\"่งฃๅŒ…\"ๅฏ่ƒฝๆ˜ฏๆœ€ๅฅฝ็š„ๆ่ฟฐๆ–นๅผไบ†ใ€‚ๅฎƒ็š„ๅซไน‰ๅพˆ็ฎ€ๅ•๏ผšโ€œๆŠŠ argv ไธญ็š„ไธœ่ฅฟ่งฃๅŒ…๏ผŒ\\\nๅฐ†ๆ‰€ๆœ‰็š„ๅ‚ๆ•ฐไพๆฌก่ต‹ไบˆๅทฆ่พน็š„ๅ˜้‡ๅโ€ใ€‚\n\nๆŽฅไธ‹ๆฅๅฐฑๆ˜ฏๆญฃๅธธ็š„ๆ‰“ๅฐไบ†ใ€‚\n\n็ญ‰ไธ€ไธ‹๏ผโ€œๅŠŸ่ƒฝโ€่ฟ˜ๆœ‰ๅฆๅค–ไธ€ไธชๅๅญ—\n===================================\n\nๅ‰้ขๆˆ‘ไปฌไฝฟ็”จ ``import`` ่ฎฉไฝ ็š„็จ‹ๅบๅฎž็Žฐๆ›ดๅคš็š„ๅŠŸ่ƒฝ๏ผŒไฝ†ๅฎž้™…ไธŠๆฒกไบบๅง ``import`` ็งฐไธบ\\\nโ€œๅŠŸ่ƒฝโ€ใ€‚ๆˆ‘ๅธŒๆœ›ไฝ ๅฏไปฅๅœจๆฒกๆŽฅ่งฆๅˆฐๆญฃๅผๆœฏ่ฏญ็š„ๆ—ถๅ€™ๅฐฑๅผ„ๆ‡‚ๅฎƒ็š„ๅŠŸ่ƒฝใ€‚ๅœจ็ปง็ปญไธ‹ๅŽปไน‹ๅ‰, ไฝ ้œ€่ฆ\\\n็Ÿฅ้“ๅฎƒไปฌ็š„็œŸๆญฃๅ็งฐ๏ผš\\ ``ๆจก็ป„(modules)``\\ใ€‚\n\nไปŽ็Žฐๅœจๅผ€ๅง‹ๆˆ‘ไปฌๅฐ†ๆŠŠ่ฟ™ไบ›ๆˆ‘ไปฌ\\ ``ๅฏผๅ…ฅ(import)``\\่ฟ›ๆฅ็š„ๅŠŸ่ƒฝ็งฐไฝœ\\ ``ๆจก็ป„``\\ใ€‚ไฝ ๅฐ†็œ‹ๅˆฐ็ฑปไผผ่ฟ™ๆ ท\\\n็š„่ฏดๆณ•๏ผšโ€œไฝ ้œ€่ฆๆŠŠ ``sys`` ๆจก็ป„ import ่ฟ›ๆฅใ€‚โ€ไนŸๆœ‰ไบบๅฐ†ๅฎƒไปฌ็งฐไฝœโ€œๅบ“(libraries)โ€๏ผŒไธ่ฟ‡\\\nๆˆ‘ไปฌ่ฟ˜ๆ˜ฏๅซๅฎƒไปฌๆจก็ป„ๅงใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n็”จไธ‹้ข็š„ๆ–นๆณ•่ฟ่กŒไฝ ็š„็จ‹ๅบ๏ผš\n\n.. code-block:: console\n\n python ex13.py first 2nd 3rd\n\nๅฆ‚ๆžœไฝ ๆฏๆฌกไฝฟ็”จไธๅŒ็š„ๅ‚ๆ•ฐ่ฟ่กŒ๏ผŒไฝ ๅฐ†็œ‹ๅˆฐไธ‹้ข็š„็ป“ๆžœ๏ผš\n\n.. literalinclude:: ex/ex13.txt\n\nไฝ ๅ…ถๅฎžๅฏไปฅๅฐ†โ€œfirstโ€ใ€โ€œ2ndโ€ใ€โ€œ3rdโ€ๆ›ฟๆขๆˆไปปๆ„ไธ‰ๆ ทไธœ่ฅฟใ€‚ไฝ ๅฏไปฅๅฐ†ๅฎƒไปฌๆขๆˆไปปๆ„\\\nไฝ ๆƒณ่ฆ็š„ไธœ่ฅฟ.\n\n.. code-block:: console\n\n python ex13.py stuff I like\n python ex13.py anything 6 7\n\n\nๅฆ‚ๆžœไฝ ๆฒกๆœ‰่ฟ่กŒๅฏน๏ผŒไฝ ๅฐ†็œ‹ๅˆฐๅฆ‚ไธ‹้”™่ฏฏ๏ผš\n\n.. code-block:: console\n\n python ex13.py first 2nd\n Traceback (most recent call last):\n File \"ex/ex13.py\", line 3, in <module>\n script, first, second, third = argv\n ValueError: need more than 3 values to unpack\n\nๅฝ“ไฝ ่ฟ่กŒ่„šๆœฌๆ—ถๆไพ›็š„ๅ‚ๆ•ฐ็š„ไธชๆ•ฐไธๅฏน็š„ๆ—ถๅ€™๏ผŒไฝ ๅฐฑไผš็œ‹ๅˆฐไธŠ่ฟฐ้”™่ฏฏไฟกๆฏ (่ฟ™ๆฌกๆˆ‘ๅช็”จไบ†\\\n ``first 2nd`` ไธคไธชๅ‚ๆ•ฐ)ใ€‚โ€œneed more than 3 values to unpackโ€่ฟ™ไธช้”™่ฏฏไฟกๆฏๅ‘Š่ฏ‰\\\n ไฝ ๅ‚ๆ•ฐๆ•ฐ้‡ไธ่ถณใ€‚\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ็ป™ไฝ ็š„่„šๆœฌไธ‰ไธชไปฅไธ‹็š„ๅ‚ๆ•ฐใ€‚็œ‹็œ‹ไผšๅพ—ๅˆฐไป€ไนˆ้”™่ฏฏไฟกๆฏใ€‚่ฏ•็€่งฃ้‡Šไธ€ไธ‹ใ€‚\n2. ๅ†ๅ†™ไธคไธช่„šๆœฌ๏ผŒๅ…ถไธญไธ€ไธชๆŽฅๅ—ๆ›ดๅฐ‘็š„ๅ‚ๆ•ฐ๏ผŒๅฆไธ€ไธชๆŽฅๅ—ๆ›ดๅคš็š„ๅ‚ๆ•ฐ๏ผŒๅœจๅ‚ๆ•ฐ่งฃๅŒ…ๆ—ถ็ป™ๅฎƒไปฌ\\\n ๅ–ไธ€ไบ›ๆœ‰ๆ„ไน‰็š„ๅ˜้‡ๅใ€‚\n3. ๅฐ† ``raw_input`` ๅ’Œ ``argv`` ไธ€่ตทไฝฟ็”จ๏ผŒ่ฎฉไฝ ็š„่„šๆœฌไปŽ็”จๆˆทๆ‰‹ไธŠๅพ—ๅˆฐๆ›ดๅคš็š„่พ“ๅ…ฅใ€‚\n4. ่ฎฐไฝโ€œๆจก็ป„(modules)โ€ไธบไฝ ๆไพ›้ขๅค–ๅŠŸ่ƒฝใ€‚ๅคš่ฏปๅ‡ ้ๆŠŠ่ฟ™ไธช่ฏ่ฎฐไฝ๏ผŒๅ› ไธบๆˆ‘ไปฌๅŽ้ข\\\n ่ฟ˜ไผš็”จๅˆฐๅฎƒใ€‚\n\n" }, { "alpha_fraction": 0.6721727848052979, "alphanum_fraction": 0.6942276358604431, "avg_line_length": 40.417293548583984, "blob_id": "d6adb7c42724e72737ba3296d4e260d4bf5a52b2", "content_id": "357eadba997b45101b05b758a1b940326e74ff32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 11018, "license_type": "no_license", "max_line_length": 125, "num_lines": 266, "path": "/ex0.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 0: The Setup\n*********************\n\nThis exercise has no code. It is simply the exercise you complete\nto get your computer setup to run Python. You should follow these instructions\nas exactly as possible. For example, Mac OSX computers already have Python 2, so\ndo not install Python 3 (or any Python).\n\n.. warning::\n\n If you do not know how to use PowerShell on Windows or the Terminal on\n OSX or \"bash\" on Linux then you need to go learn that first. I have a\n quick crash course at http://cli.learncodethehardway.org/ which is free\n and will teach you the basics of PowerShell and Terminal quickly. Go\n through that then come back here.\n\n\nMac OSX\n=======\n\nTo complete this exercise, complete the following tasks:\n\n1. Go to http://learnpythonthehardway.org/exercise0.html with your browser, get\n the ``gedit`` text editor, and install it.\n2. Put gedit (your editor) in your Dock so you can reach\n it easily.\n\n a. Run gedit so we can fix some stupid defaults it has.\n b. Open ``Preferences`` from the ``gedit menu`` and select the ``Editor`` tab.\n c. Change ``Tab width:`` to 4.\n d. Select (make sure a check mark is in) ``Insert spaces instead of tabs``.\n e. Turn on \"Automatic indentation\" as well.\n f. Open the ``View`` tab and turn on \"Display line numbers\".\n\n3. Find your \"Terminal\" program. Search for it. You will find it.\n4. Put your Terminal in your Dock as well.\n5. Run your Terminal program. It won't look like much.\n6. In your Terminal program, run ``python``. You run\n things in Terminal by just typing their name and hitting RETURN.\n7. Hit CTRL-D (^D) and get out of python.\n8. You should be back at a prompt similar to what you had before you typed ``python``. If not find out why.\n9. Learn how to make a directory in the Terminal. Search online for help.\n10. Learn how to change into a directory in the Terminal. Again search online.\n11. Use your editor to create a file in this directory. You\n will make the file, \"Save\" or \"Save As...\", and pick this directory.\n12. Go back to Terminal using just the keyboard to switch windows. Look it\n up if you can't figure it out.\n13. Back in Terminal, see if you can list the directory to see your \n newly created file. Search online for how to list a directory.\n\n\n.. note::\n\n If you have problems with gedit, which is possible with non-English\n keyboard layouts, then I suggest you try Textwrangler found\n at http://www.barebones.com/products/textwrangler/ instead.\n\n\nOSX: What You Should See\n------------------------\n\nHere's me doing the above on my computer in Terminal. Your computer would be\ndifferent, so see if you can figure out all the differences between what I did\nand what you should do.\n\n.. code-block:: console\n \n Last login: Sat Apr 24 00:56:54 on ttys001\n ~ $ python\n Python 2.5.1 (r251:54863, Feb 6 2009, 19:02:12) \n [GCC 4.0.1 (Apple Inc. build 5465)] on darwin\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> ^D\n ~ $ mkdir mystuff\n ~ $ cd mystuff\n mystuff $ ls\n # ... Use Gedit here to edit test.txt....\n mystuff $ ls\n test.txt\n mystuff $ \n\nWindows\n=======\n\n.. note:: Contributed by zhmark.\n\n1. Go to http://learnpythonthehardway.org/exercise0.html with your browser, get\n the ``gedit`` text editor, and install it. You do not need to be administrator to do this.\n2. Make sure you can get to ``gedit`` easily by putting it on your desktop and/or in\n ``Quick Launch``. Both options are available during setup.\n\n a. Run gedit so we can fix some stupid defaults it has.\n b. Open ``Edit->Preferences`` select the ``Editor`` tab.\n c. Change ``Tab width:`` to 4.\n d. Select (make sure a check mark is in) ``Insert spaces instead of tabs``.\n e. Turn on \"Automatic indentation\" as well.\n f. Open the ``View`` tab turn on \"Display line numbers\".\n\n3. Run \"powershell\" from the start menu. Search for it and you can just hit enter to run it.\n4. Make a shortcut to it on your desktop and/or ``Quick Launch`` for your convenience.\n5. Run your Terminal program. It won't look like much.\n6. In your Terminal program, run ``python``. You run things in Terminal by just typing their \n name and hitting RETURN.\n\n a. If you run ``python`` and it's not there (``python is not recognized..``). Install it from http://python.org/download \n b. *Make sure you install Python 2 not Python 3.*\n c. You may be better off with ActiveState Python especially when you miss Administrative rights\n d. If after you install it ``python`` still isn't recognized then in powershell enter this: \n \n ``[Environment]::SetEnvironmentVariable(\"Path\", \"$env:Path;C:\\Python27\", \"User\")``\n \n e. Close powershell and then start it again to make sure python now runs. If it doesn't restart may be required.\n\n7. Hit CTRL-Z (^Z), ``Enter`` and get out of ``python``.\n8. You should be back at a prompt similar to what you had before you typed ``python``. If not find out why.\n9. Learn how to make a directory in the Terminal. Search online for help.\n10. Learn how to change into a directory in the Terminal. Again search online.\n11. Use your editor to create a file in this directory. Make the file, \"Save\" or \"Save As...\", and pick this directory.\n12. Go back to Terminal using just the keyboard to switch windows. Look it\n up if you can't figure it out.\n13. Back in Terminal, see if you can list the directory to see your \n newly created file. Search online for how to list a directory.\n\n\n.. warning::\n\n If you missed it, sometimes you install Python on Windows and it doesn't configure the path correctly.\n Make sure you enter ``[Environment]::SetEnvironmentVariable(\"Path\", \"$env:Path;C:\\Python27\", \"User\")``\n in ``powershell`` to configure it correctly. You also have to either restart powershell or your\n whole computer to get it to really be fixed.\n\n\nWindows: What You Should See\n----------------------------\n\n\n.. code-block:: console\n\n > python\n ActivePython 2.6.5.12 (ActiveState Software Inc.) based on\n Python 2.6.5 (r265:79063, Mar 20 2010, 14:22:52) [MSC v.1500 32 bit (Intel)] on win32\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> ^Z\n\n\n > mkdir mystuff\n\n > cd mystuff\n\n ... Here you would use gedit to make test.txt in mystuff ...\n\n >\n <bunch of unimportant errors if you istalled it as non-admin - ignore them - hit Enter>\n > dir\n Volume in drive C is\n Volume Serial Number is 085C-7E02\n\n Directory of C:\\Documents and Settings\\you\\mystuff\n\n 04.05.2010 23:32 <DIR> .\n 04.05.2010 23:32 <DIR> ..\n 04.05.2010 23:32 6 test.txt\n 1 File(s) 6 bytes\n 2 Dir(s) 14 804 623 360 bytes free\n\n > \n\nYou will probably see a very different prompt, Python information, and other stuff but this is\nthe general idea. If your system is different let us know at http://learnpythonthehardway.org\nand we'll fix it.\n\n\nLinux\n=====\n\nLinux is a varied operating system with a bunch of different ways to install software.\nI'm assuming if you are running Linux then you know how to install packages so here are\nyour instructions:\n\n1. Go to http://learnpythonthehardway.org/exercise0.html with your browser, get\n the ``gedit`` text editor, and install it.\n2. Make sure you can get to ``gedit`` easily\n by putting it in your window manager's menu.\n\n a. Run gedit so we can fix some stupid defaults it has.\n b. Open ``Preferences`` select the ``Editor`` tab.\n c. Change ``Tab width:`` to 4.\n d. Select (make sure a check mark is in) ``Insert spaces instead of tabs``.\n e. Turn on \"Automatic indentation\" as well.\n f. Open the ``View`` tab turn on \"Display line numbers\".\n\n3. Find your \"Terminal\" program. It could be called ``GNOME Terminal``, ``Konsole``, or ``xterm``.\n4. Put your Terminal in your Dock as well.\n5. Run your Terminal program. It won't look like much.\n6. In your Terminal program, run ``python``. You run\n things in Terminal by just typing their name and hitting RETURN.\n\n a. If you run ``python`` and it's not there, install it. *Make sure you install Python 2 not Python 3.*\n\n7. Hit CTRL-D (^D) and get out of ``python``.\n8. You should be back at a prompt similar to what you had before you typed ``python``. If not find out why.\n9. Learn how to make a directory in the Terminal. Search online for help.\n10. Learn how to change into a directory in the Terminal. Again search online.\n11. Use your editor to create a file in this directory. Typically you\n will make the file, \"Save\" or \"Save As..\", and pick this directory.\n12. Go back to Terminal using just the keyboard to switch windows. Look it\n up if you can't figure it out.\n13. Back in Terminal see if you can list the directory to see your \n newly created file. Search online for how to list a directory.\n\n\nLinux: What You Should See\n--------------------------\n\n\n.. code-block:: console\n\n $ python\n Python 2.6.5 (r265:79063, Apr 1 2010, 05:28:39)\n [GCC 4.4.3 20100316 (prerelease)] on linux2\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>>\n $ mkdir mystuff\n $ cd mystuff\n # ... Use gedit here to edit test.txt ...\n $ ls\n test.txt\n $ \n\nYou will probably see a very different prompt, Python information, and other stuff but this is\nthe general idea.\n\n\nWarnings For Beginners\n======================\n\nYou are done with this exercise. This exercise might be hard for you\ndepending on your familiarity with your computer. If it is difficult,\ntake the time to read and study and get through it, because until you can do\nthese very basic things you will find it difficult to get much programming done.\n\nIf a programmer tells you to use ``vim`` or ``emacs``, tell them, \"No.\" These\neditors are for when you are a better programmer. All you need right now\nis an editor that lets you put text into a file. We will use ``gedit`` because\nit is simple and the same on all computers. Professional programmers use\n``gedit`` so it's good enough for you starting out.\n\nA programmer may try to get you to install Python 3 and learn that. You\nshould tell them, \"When all of the python code on your computer is Python 3,\nthen I'll try to learn it.\" That should keep them busy for about 10 years.\n\nA programmer will eventually tell you to use Mac OSX or Linux. If the programmer\nlikes fonts and typography, they'll tell you to get a Mac OSX computer. If they\nlike control and have a huge beard, they'll tell you to install Linux. Again,\nuse whatever computer you have right now that works. All you need is ``gedit``,\na Terminal, and ``python``.\n\nFinally the purpose of this setup is so you can do three things very reliably\nwhile you work on the exercises:\n\n1. *Write* exercises using ``gedit``.\n2. *Run* the exercises you wrote.\n3. *Fix* them when they are broken.\n4. Repeat.\n\nAnything else will only confuse you, so stick to the plan.\n\n" }, { "alpha_fraction": 0.6550424098968506, "alphanum_fraction": 0.670122504234314, "avg_line_length": 22.04347801208496, "blob_id": "20bff8be539f668b34ebc5fef2e21527e4386261", "content_id": "ae746fed69bffddbe95c8fd0e114dcebbab4c28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2271, "license_type": "no_license", "max_line_length": 63, "num_lines": 46, "path": "/cn/ex16.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 16: ่ฏปๅ†™ๆ–‡ไปถ\n**************************************\n\nๅฆ‚ๆžœไฝ ๅšไบ†ไธŠไธ€ไธช็ปƒไน ็š„ๅŠ ๅˆ†ไน ้ข˜๏ผŒไฝ ๅบ”่ฏฅๅทฒ็ปไบ†่งฃไบ†ๅ„็งๆ–‡ไปถ็›ธๅ…ณ็š„ๅ‘ฝไปค๏ผˆๆ–นๆณ•/ๅ‡ฝๆ•ฐ๏ผ‰ใ€‚\\\nไฝ ๅบ”่ฏฅ่ฎฐไฝ็š„ๅ‘ฝไปคๅฆ‚ไธ‹๏ผš\n\n* close -- ๅ…ณ้—ญๆ–‡ไปถใ€‚่ทŸไฝ ็ผ–่พ‘ๅ™จ็š„ ``ๆ–‡ไปถ->ไฟๅญ˜..`` ไธ€ไธชๆ„ๆ€ใ€‚\n* read -- ่ฏปๅ–ๆ–‡ไปถๅ†…ๅฎนใ€‚ไฝ ๅฏไปฅๆŠŠ็ป“ๆžœ่ต‹็ป™ไธ€ไธชๅ˜้‡ใ€‚\n* readline -- ่ฏปๅ–ๆ–‡ๆœฌๆ–‡ไปถไธญ็š„ไธ€่กŒใ€‚\n* truncate -- ๆธ…็ฉบๆ–‡ไปถ๏ผŒ่ฏทๅฐๅฟƒไฝฟ็”จ่ฏฅๅ‘ฝไปคใ€‚\n* write(stuff) -- ๅฐ†stuffๅ†™ๅ…ฅๆ–‡ไปถใ€‚\n\n่ฟ™ๆ˜ฏไฝ ็Žฐๅœจ่ฏฅ็Ÿฅ้“็š„้‡่ฆๅ‘ฝไปคใ€‚ๆœ‰ไบ›ๅ‘ฝไปค้œ€่ฆๆŽฅๅ—ๅ‚ๆ•ฐ๏ผŒ่ฟ™ๅฏนๆˆ‘ไปฌๅนถไธ้‡่ฆใ€‚ไฝ \\\nๅช่ฆ่ฎฐไฝ ``write`` ็š„็”จๆณ•ๅฐฑๅฏไปฅไบ†ใ€‚ ``write`` ้œ€่ฆๆŽฅๆ”ถไธ€ไธชๅญ—็ฌฆไธฒไฝœไธบๅ‚ๆ•ฐ๏ผŒไปŽ่€Œๅฐ†่ฏฅๅญ—็ฌฆไธฒๅ†™ๅ…ฅๆ–‡ไปถใ€‚\n\n่ฎฉๆˆ‘ไปฌๆฅไฝฟ็”จ่ฟ™ไบ›ๅ‘ฝไปคๅšไธ€ไธช็ฎ€ๅ•็š„ๆ–‡ๆœฌ็ผ–่พ‘ๅ™จๅง: \n\n\n.. literalinclude:: ex/ex16.py\n :linenos:\n\n่ฟ™ไธชๆ–‡ไปถๆ˜ฏๅคŸๅคง็š„๏ผŒๅคงๆฆ‚ๆ˜ฏไฝ ้”ฎๅ…ฅ่ฟ‡็š„ๆœ€ๅคง็š„ๆ–‡ไปถใ€‚ๆ‰€ไปฅๆ…ขๆ…ขๆฅ๏ผŒไป”็ป†ๆฃ€ๆŸฅ๏ผŒ่ฎฉๅฎƒ่ƒฝ\\\n่ฟ่กŒ่ตทๆฅใ€‚ๆœ‰ไธ€ไธชๅฐๆŠ€ๅทงๅฐฑๆ˜ฏไฝ ๅฏไปฅ่ฎฉไฝ ็š„่„šๆœฌไธ€้ƒจๅˆ†ไธ€้ƒจๅˆ†ๅœฐ่ฟ่กŒ่ตทๆฅใ€‚ๅ…ˆๅ†™ 1-8 ่กŒ๏ผŒ\\\n่ฎฉๅฎƒ่ฟ่กŒ่ตทๆฅ๏ผŒๅ†ๅคš่ฟ่กŒ 5 ่กŒ๏ผŒๅ†ๆŽฅ็€ๅคš่ฟ่กŒๅ‡ ่กŒ๏ผŒไปฅๆญค็ฑปๆŽจ๏ผŒ็›ดๅˆฐๆ•ดไธช่„šๆœฌ่ฟ่กŒ่ตทๆฅไธบๆญขใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nไฝ ๅฐ†็œ‹ๅˆฐไธคๆ ทไธœ่ฅฟ๏ผŒไธ€ๆ ทๆ˜ฏไฝ ๆ–ฐ่„šๆœฌ็š„่พ“ๅ‡บ:\n\n.. literalinclude:: ex/ex16.txt\n\nๆŽฅไธ‹ๆฅๆ‰“ๅผ€ไฝ ๆ–ฐๅปบ็š„ๆ–‡ไปถ๏ผˆๆˆ‘็š„ๆ˜ฏ ``test.txt`` ๏ผ‰ๆฃ€ๆŸฅไธ€ไธ‹้‡Œ่พน็š„ๅ†…ๅฎน๏ผŒๆ€Žไนˆๆ ท๏ผŒไธ้”™ๅง๏ผŸ\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅฆ‚ๆžœไฝ ่ง‰ๅพ—่‡ชๅทฑๆฒกๆœ‰ๅผ„ๆ‡‚็š„่ฏ๏ผŒ็”จๆˆ‘ไปฌ็š„่€ๅŠžๆณ•๏ผŒๅœจๆฏไธ€่กŒไน‹ๅ‰ๅŠ ไธŠๆณจ่งฃ๏ผŒไธบ่‡ชๅทฑ็†ๆธ…ๆ€่ทฏใ€‚\\\n ๅฐฑ็ฎ—ไธ่ƒฝ็†ๆธ…ๆ€่ทฏ๏ผŒไฝ ไนŸๅฏไปฅ็Ÿฅ้“่‡ชๅทฑ็ฉถ็ซŸๅ…ทไฝ“ๅ“ช้‡Œๆฒกๅผ„ๆ˜Ž็™ฝใ€‚\n2. ๅ†™ไธ€ไธชๅ’ŒไธŠไธ€ไธช็ปƒไน ็ฑปไผผ็š„่„šๆœฌ๏ผŒไฝฟ็”จ ``read`` ๅ’Œ ``argv`` ่ฏปๅ–ไฝ ๅˆšๆ‰ๆ–ฐๅปบ็š„ๆ–‡ไปถใ€‚\n3. ๆ–‡ไปถไธญ้‡ๅค็š„ๅœฐๆ–นๅคชๅคšไบ†ใ€‚่ฏ•็€็”จไธ€ไธช ``target.write()`` ๅฐ† ``line1``, ``line2``,\n ``line3`` ๆ‰“ๅฐๅ‡บๆฅ๏ผŒไฝ ๅฏไปฅไฝฟ็”จๅญ—็ฌฆไธฒใ€ๆ ผๅผๅŒ–ๅญ—็ฌฆใ€ไปฅๅŠ่ฝฌไน‰ๅญ—็ฌฆใ€‚\n4. ๆ‰พๅ‡บไธบไป€ไนˆๆˆ‘ไปฌ้œ€่ฆ็ป™ ``open`` ๅคš่ต‹ไบˆไธ€ไธช ``'w'`` ๅ‚ๆ•ฐใ€‚ๆ็คบ๏ผš\n ``open`` ๅฏนไบŽๆ–‡ไปถ็š„ๅ†™ๅ…ฅๆ“ไฝœๆ€ๅบฆๆ˜ฏๅฎ‰ๅ…จ็ฌฌไธ€๏ผŒๆ‰€ไปฅไฝ ๅชๆœ‰็‰นๅˆซๆŒ‡ๅฎšไปฅๅŽ๏ผŒๅฎƒๆ‰ไผš่ฟ›่กŒๅ†™ๅ…ฅๆ“ไฝœใ€‚\n\n" }, { "alpha_fraction": 0.5750962495803833, "alphanum_fraction": 0.5879332423210144, "avg_line_length": 18.450000762939453, "blob_id": "b2bd94a2ae5730c2a420075de53ef1e8a306aab2", "content_id": "fa7024895dc9251e386c4e701dc490c4ccc081e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1389, "license_type": "no_license", "max_line_length": 68, "num_lines": 40, "path": "/cn/ex12.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 12: ๆ็คบๅˆซไบบ\n*****************************\n\nๅฝ“ไฝ ้”ฎๅ…ฅ ``raw_input()`` ็š„ๆ—ถๅ€™๏ผŒไฝ ้œ€่ฆ้”ฎๅ…ฅ ``(`` ๅ’Œ ``)`` ไนŸๅฐฑๆ˜ฏโ€œๆ‹ฌๅท(parenthesis)โ€ใ€‚\\ \n่ฟ™ๅ’Œไฝ ๆ ผๅผๅŒ–่พ“ๅ‡บไธคไธชไปฅไธŠๅ˜้‡ๆ—ถ็š„ๆƒ…ๅ†ตๆœ‰็‚น็ฑปไผผ๏ผŒๆฏ”ๅฆ‚่ฏด ``\"%s %s\" % (x, y)`` ้‡Œ่พนๅฐฑ\\\nๆœ‰ๆ‹ฌๅทใ€‚ๅฏนไบŽ ``raw_input`` ่€Œ่จ€๏ผŒไฝ ่ฟ˜ๅฏไปฅ่ฎฉๅฎƒๆ˜พ็คบๅ‡บไธ€ไธชๆ็คบ๏ผŒไปŽ่€Œๅ‘Š่ฏ‰ๅˆซไบบๅบ”่ฏฅ่พ“ๅ…ฅ\\\nไป€ไนˆไธœ่ฅฟใ€‚ไฝ ๅฏไปฅๅœจ ``()`` ไน‹้—ดๆ”พๅ…ฅไธ€ไธชไฝ ๆƒณ่ฆไฝœไธบๆ็คบ็š„ๅญ—็ฌฆไธฒ๏ผŒๅฆ‚ไธ‹ๆ‰€็คบ๏ผš\n\n\n.. code-block:: python\n\n y = raw_input(\"Name? \")\n\n่ฟ™ๅฅ่ฏไผš็”จ โ€œName?โ€ ๆ็คบ็”จๆˆท๏ผŒ็„ถๅŽๅฐ†็”จๆˆท่พ“ๅ…ฅ็š„็ป“ๆžœ่ต‹ๅ€ผ็ป™ๅ˜้‡ ``y``\\ใ€‚่ฟ™ๅฐฑๆ˜ฏๆˆ‘ไปฌ\\\nๆ้—ฎ็”จๆˆทๅนถไธ”ๅพ—ๅˆฐ็ญ”ๆกˆ็š„ๆ–นๅผใ€‚\n\n\nไนŸๅฐฑๆ˜ฏ่ฏด๏ผŒๆˆ‘ไปฌ็š„ไธŠไธ€ไธช็ปƒไน ๅฏไปฅไฝฟ็”จ ``raw_input`` ้‡ๅ†™ไธ€ๆฌกใ€‚ๆ‰€ๆœ‰็š„ๆ็คบ้ƒฝๅฏไปฅ้€š่ฟ‡\n ``raw_input`` ๅฎž็Žฐใ€‚\n \n\n.. literalinclude:: ex/ex12.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n\n.. literalinclude:: ex/ex12.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅœจๅ‘ฝไปค่กŒ็•Œ้ขไธ‹่ฟ่กŒไฝ ็š„็จ‹ๅบ๏ผŒ็„ถๅŽๅœจๅ‘ฝไปค่กŒ่พ“ๅ…ฅ ``pydoc raw_input`` ็œ‹ๅฎƒ่ฏดไบ†ไบ›ไป€ไนˆใ€‚\n2. ่พ“ๅ…ฅ ``q`` ้€€ๅ‡บ pydocใ€‚\n3. ไธŠ็ฝ‘ๆ‰พไธ€ไธ‹ ``pydoc`` ๅ‘ฝไปคๆ˜ฏ็”จๆฅๅšไป€ไนˆ็š„ใ€‚\n4. ไฝฟ็”จ pydoc ๅ†็œ‹ไธ€ไธ‹ ``open``, ``file``, ``os``, ๅ’Œ ``sys`` ็š„ๅซไน‰ใ€‚็œ‹ไธๆ‡‚ๆฒก\\\n ๅ…ณ็ณป๏ผŒๅช่ฆ้€š่ฏปไธ€ไธ‹๏ผŒ่ฎฐไธ‹ไฝ ่ง‰ๅพ—ๆœ‰ๆ„ๆ€็š„็‚นๅฐฑ่กŒไบ†ใ€‚\n\n" }, { "alpha_fraction": 0.8111110925674438, "alphanum_fraction": 0.8111110925674438, "avg_line_length": 21.5, "blob_id": "be38f639c45a1d64d3a375c2c657157ca33afb28", "content_id": "1d63cc3f8260ff6cbf442882e56f2cc265c0d5e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/README.md", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "# lpthw-cn\nLearn Python the Hard Way Chinese Translation\n\nๆœฌไนฆๅทฒ็”ฑไบบๆฐ‘้‚ฎ็”ตๅ‡บ็‰ˆ็คพๅ‡บ็‰ˆใ€‚ๆญคๅค„ไธบๆ—ง repo๏ผŒๅ†…ๅฎนไธๅ†ๆ›ดๆ–ฐใ€‚\n" }, { "alpha_fraction": 0.6438485980033875, "alphanum_fraction": 0.6809148192405701, "avg_line_length": 30.226600646972656, "blob_id": "e5023aceaf415a1951416d2a69e394478199c57c", "content_id": "09e491f59fc6d301e4bab4e177f4257fdf63066d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10942, "license_type": "no_license", "max_line_length": 120, "num_lines": 203, "path": "/cn/ex0.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 0: ๅ‡†ๅค‡ๅทฅไฝœ\n*********************\n\n่ฟ™้“ไน ้ข˜ๅนถๆฒกๆœ‰ไปฃ็ ๅ†…ๅฎน๏ผŒๅฎƒ็š„ไธป่ฆ็›ฎ็š„ๆ˜ฏ่ฎฉไฝ ๅœจ่ฎก็ฎ—ๆœบไธŠๅฎ‰่ฃ…ๅฅฝ Pythonใ€‚ไฝ ๅบ”่ฏฅๅฐฝ้‡\\\n็…ง็€่ฏดๆ˜Ž่ฟ›่กŒๆ“ไฝœ๏ผŒไพ‹ๅฆ‚ Mac OSX ้ป˜่ฎคๅทฒ็ปๅฎ‰่ฃ…ไบ† Python 2๏ผŒๆ‰€ไปฅๅฐฑไธ่ฆๅœจไธŠ้ขๅฎ‰่ฃ…\\\nPython 3 ๆˆ–่€…ๅˆซ็š„ Python ็‰ˆๆœฌไบ†ใ€‚\n\n\nMac OSX\n=======\n\nไฝ ้œ€่ฆๅšไธ‹ๅˆ—ไปปๅŠกๆฅๅฎŒๆˆ่ฟ™ไธช็ปƒไน ๏ผš\n\n1. ็”จๆต่งˆๅ™จๆ‰“ๅผ€ http://learnpythonthehardway.org/wiki/ExerciseZero ไธ‹่ฝฝๅนถๅฎ‰่ฃ… ``gedit`` ๆ–‡ๆœฌ็ผ–่พ‘ๅ™จใ€‚\n2. ๆŠŠ gedit (ไนŸๅฐฑๆ˜ฏไฝ ็š„็ผ–่พ‘ๅ™จ) ๆ”พๅˆฐ Dock ไธญ๏ผŒไปฅๆ–นไพฟๆ—ฅๅŽไฝฟ็”จใ€‚\n a. ่ฟ่กŒ gedit๏ผŒๆˆ‘ไปฌ่ฆๅ…ˆๆ”นๆމไธ€ไบ›ๆ„š่ ข็š„้ป˜่ฎค่ฎพๅฎšใ€‚\n b. ไปŽ ``gedit menu`` ไธญๆ‰“ๅผ€ ``Preferences``\\๏ผŒ้€‰ๆ‹ฉ ``Editor`` ้กต้ขใ€‚\n c. ๅฐ† ``Tab width:`` ๆ”นไธบ 4ใ€‚\n d. ้€‰ๆ‹ฉ (็กฎ่ฎคๆœ‰ๅ‹พ้€‰ๅˆฐ่ฏฅ้€‰้กน) ``Insert spaces instead of tabs``\\ใ€‚\n e. ็„ถๅŽๆ‰“ๅผ€ โ€œAutomatic indentationโ€ ้€‰้กนใ€‚\n f. ่ฝฌๅˆฐ ``View`` ้กต้ข๏ผŒๆ‰“ๅผ€ โ€œDisplay line numbersโ€ ้€‰้กนใ€‚\n3. ๆ‰พๅˆฐ็ณป็ปŸไธญ็š„ โ€œๅ‘ฝไปค่กŒ็ปˆ็ซฏ(Terminal)โ€ ็จ‹ๅบใ€‚ๅˆฐๅค„ๆ‰พๆ‰พ๏ผŒไฝ ไผšๆ‰พๅˆฐ็š„ใ€‚\n4. ๆŠŠ Terminal ไนŸๆ”พๅˆฐ Dock ้‡Œ้ขใ€‚\n5. ่ฟ่กŒ Terminal ็จ‹ๅบ๏ผŒ่ฟ™ไธช็จ‹ๅบ็œ‹ไธŠๅŽปไธๆ€Žไนˆๅœฐใ€‚\n6. ๅœจ Terminal ็จ‹ๅบ้‡Œ่พน่ฟ่กŒ ``python``\\ใ€‚่ฟ่กŒ็š„ๆ–นๆณ•ๆ˜ฏ่พ“ๅ…ฅ็จ‹ๅบ็š„ๅๅญ—ๅ†ๆ•ฒไธ€ไธ‹ๅ›ž่ฝฆใ€‚\n7. ๆ•ฒๅ‡ป CTRL-D (^D) ้€€ๅ‡บ pythonใ€‚\n8. ่ฟ™ๆ ทไฝ ๅฐฑๅบ”่ฏฅ้€€ๅ›žๅˆฐๆ•ฒ ``python`` ๅ‰็š„ๆ็คบ็•Œ้ขไบ†ใ€‚ๅฆ‚ๆžœๆฒกๆœ‰็š„่ฏ่‡ชๅทฑ็ ”็ฉถไธ€ไธ‹ไธบไป€ไนˆใ€‚\n9. ๅญฆ็€ไฝฟ็”จ Terminal ๅˆ›ๅปบไธ€ไธช็›ฎๅฝ•๏ผŒไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขๆ€Žๆ ทๅšใ€‚\n10. ๅญฆ็€ไฝฟ็”จ Terminal ่ฟ›ๅ…ฅไธ€ไธช็›ฎๅฝ•๏ผŒๅŒๆ ทไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n11. ไฝฟ็”จไฝ ็š„็ผ–่พ‘ๅ™จๅœจไฝ ่ฟ›ๅ…ฅ็š„็›ฎๅฝ•ไธ‹ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถใ€‚ไฝ ๅฐ†ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถใ€‚ไฝฟ็”จ โ€œSaveโ€ ๆˆ–่€… โ€œSave As...โ€ ้€‰้กน๏ผŒ็„ถๅŽ้€‰ๆ‹ฉ่ฟ™ไธช็›ฎๅฝ•ใ€‚\n12. ไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆขๅ›žๅˆฐ Terminal ็ช—ๅฃ๏ผŒๅฆ‚ๆžœไธ็Ÿฅ้“ๆ€Žๆ ทไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆข๏ผŒไฝ ไธ€ๆ ทๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n13. ๅ›žๅˆฐ Terminal๏ผŒ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝไฝฟ็”จๅ‘ฝไปค็œ‹ๅˆฐไฝ ๆ–ฐๅปบ็š„ๆ–‡ไปถ๏ผŒไธŠ็ฝ‘ๆœ็ดขๅฆ‚ไฝ•ๅฐ†ๆ–‡ไปถๅคนไธญ็š„ๅ†…ๅฎนๅˆ—ๅ‡บๆฅใ€‚\n\n\n\nOSX: ไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n------------------------\n\nไปฅไธ‹ๆ˜ฏๆˆ‘ๅœจ่‡ชๅทฑ็”ต่„‘็š„ Terminal ไธญๆ‰ง่กŒไธŠ่ฟฐ็ปƒไน ๆ—ถ็œ‹ๅˆฐ็š„ๅ†…ๅฎนใ€‚ๅ’Œไฝ ๅš็š„็ป“ๆžœไผšๆœ‰ไธ€ไบ›ไธๅŒ๏ผŒๆ‰€ไปฅ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝๆ‰พๅ‡บไธค่€…ไธๅŒ็‚นๆฅใ€‚\n\n.. code-block:: console\n \n Last login: Sat Apr 24 00:56:54 on ttys001\n ~ $ python\n Python 2.5.1 (r251:54863, Feb 6 2009, 19:02:12) \n [GCC 4.0.1 (Apple Inc. build 5465)] on darwin\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> ^D\n ~ $ mkdir mystuff\n ~ $ cd mystuff\n mystuff $ ls\n # ... ไฝฟ็”จgedit็ผ–่พ‘text.txt ...\n mystuff $ ls\n test.txt\n mystuff $ \n\nWindows\n=======\n\n.. note:: Contributed by zhmark.\n\n1. ็”จๆต่งˆๅ™จๆ‰“ๅผ€ http://learnpythonthehardway.org/wiki/ExerciseZero ไธ‹่ฝฝๅนถๅฎ‰่ฃ… ``gedit`` ๆ–‡ๆœฌ็ผ–่พ‘ๅ™จใ€‚่ฟ™ไธชๆ“ไฝœๆ— ้œ€็ฎก็†ๅ‘˜ๆƒ้™ใ€‚\n2. ๆŠŠ ``gedit`` ๆ”พๅˆฐๆกŒ้ขๆˆ–่€…ๅฟซ้€ŸๅฏๅŠจๆ ๏ผŒ่ฟ™ๆ ทไฝ ๅฐฑๅฏไปฅๆ–นไพฟๅœฐ่ฎฟ้—ฎๅˆฐ่ฏฅ็จ‹ๅบไบ†ใ€‚่ฟ™ไธคๆกๅœจๅฎ‰่ฃ…้€‰้กนไธญๅฏไปฅ็œ‹ๅˆฐใ€‚\n a. ่ฟ่กŒ gedit๏ผŒๆˆ‘ไปฌ่ฆๅ…ˆๆ”นๆމไธ€ไบ›ๆ„š่ ข็š„้ป˜่ฎค่ฎพๅฎšใ€‚\n b. ไปŽ ``gedit menu`` ไธญๆ‰“ๅผ€ ``Preferences``\\๏ผŒ้€‰ๆ‹ฉ ``Editor`` ้กต้ขใ€‚\n c. ๅฐ† ``Tab width:`` ๆ”นไธบ 4ใ€‚\n d. ้€‰ๆ‹ฉ (็กฎ่ฎคๆœ‰ๅ‹พ้€‰ๅˆฐ่ฏฅ้€‰้กน) ``Insert spaces instead of tabs``\\ใ€‚\n e. ็„ถๅŽๆ‰“ๅผ€ โ€œAutomatic indentationโ€ ้€‰้กนใ€‚\n f. ่ฝฌๅˆฐ ``View`` ้กต้ข๏ผŒๆ‰“ๅผ€ โ€œDisplay line numbersโ€ ้€‰้กนใ€‚\n3. ๆ‰พๅˆฐ โ€œTerminalโ€ ็จ‹ๅบใ€‚ๅฎƒ็š„ๅๅญ—ๆ˜ฏ ``ๅ‘ฝไปคๆ็คบ็ฌฆ``\\๏ผŒๆˆ–่€…ไฝ ไนŸๅฏไปฅ็›ดๆŽฅ่ฟ่กŒ ``cmd``\\ใ€‚ \n4. ไธบๅฎƒๅˆ›ๅปบไธ€ไธชๅฟซๆทๆ–นๅผ๏ผŒๆ”พๅˆฐๆกŒ้ขๆˆ–่€…ๅฟซ้€ŸๅฏๅŠจๆ ไธญไปฅๆ–นไพฟไฝฟ็”จใ€‚\n5. ่ฟ่กŒ Terminal ็จ‹ๅบ๏ผŒ่ฟ™ไธช็จ‹ๅบ็œ‹ไธŠๅŽปไธๆ€Žไนˆๅœฐใ€‚\n6. ๅœจ Terminal ็จ‹ๅบ้‡Œ่พน่ฟ่กŒ ``python``\\ใ€‚่ฟ่กŒ็š„ๆ–นๆณ•ๆ˜ฏ่พ“ๅ…ฅ็จ‹ๅบ็š„ๅๅญ—ๅ†ๆ•ฒไธ€ไธ‹ๅ›ž่ฝฆใ€‚\n a. ๅฆ‚ๆžœไฝ ่ฟ่กŒ ``python`` ๅ‘็Žฐๅฎƒไธๅญ˜ๅœจ(``็ณป็ปŸๆ‰พไธๅˆฐpythonไบ‘ไบ‘``)ใ€‚ไฝ ้œ€่ฆ่ฎฟ้—ฎ http://python.org/download ๅนถไธ”ๅฎ‰่ฃ… Pythonใ€‚\n b. ็กฎ่ฎคไฝ ๅฎ‰่ฃ…็š„ๆ˜ฏ Python 2 ่€Œไธๆ˜ฏ Python 3ใ€‚\n c. ไฝ ไนŸๅฏไปฅ่ฏ•่ฏ• ActiveState Python๏ผŒๅฐคๅ…ถๆ˜ฏไฝ ๆฒกๆœ‰็ฎก็†ๅ‘˜ๆƒ้™็š„ๆ—ถๅ€™ใ€‚\n7. ๆ•ฒๅ‡ป CTRL-Z (^Z)๏ผŒๅ†ๆ•ฒๅ›ž่ฝฆไปฅ้€€ๅ‡บ ``python``\\ใ€‚\n8. ่ฟ™ๆ ทไฝ ๅฐฑๅบ”่ฏฅ้€€ๅ›žๅˆฐๆ•ฒ ``python`` ๅ‰็š„ๆ็คบ็•Œ้ขไบ†ใ€‚ๅฆ‚ๆžœๆฒกๆœ‰็š„่ฏ่‡ชๅทฑ็ ”็ฉถไธ€ไธ‹ไธบไป€ไนˆใ€‚\n9. ๅญฆ็€ไฝฟ็”จ Terminal ๅˆ›ๅปบไธ€ไธช็›ฎๅฝ•๏ผŒไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขๆ€Žๆ ทๅšใ€‚\n10. ๅญฆ็€ไฝฟ็”จ Terminal ่ฟ›ๅ…ฅไธ€ไธช็›ฎๅฝ•ใ€‚ๅŒๆ ทไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n11. ไฝฟ็”จไฝ ็š„็ผ–่พ‘ๅ™จๅœจไฝ ่ฟ›ๅ…ฅ็š„็›ฎๅฝ•ไธ‹ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถใ€‚ไฝ ๅฐ†ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถ๏ผŒไฝฟ็”จ \"Save\" ๆˆ–่€… \"Save As...\" ้€‰้กน๏ผŒ็„ถๅŽ้€‰ๆ‹ฉ่ฟ™ไธช็›ฎๅฝ•ใ€‚\n12. ไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆขๅ›žๅˆฐ Terminal ็ช—ๅฃ๏ผŒๅฆ‚ๆžœไธ็Ÿฅ้“ๆ€Žๆ ทไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆข๏ผŒไฝ ไธ€ๆ ทๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n13. ๅ›žๅˆฐ Terminal๏ผŒ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝไฝฟ็”จๅ‘ฝไปค็œ‹ๅˆฐไฝ ๆ–ฐๅปบ็š„ๆ–‡ไปถ๏ผŒไธŠ็ฝ‘ๆœ็ดขๅฆ‚ไฝ•ๅฐ†ๆ–‡ไปถๅคนไธญ็š„ๅ†…ๅฎนๅˆ—ๅ‡บๆฅใ€‚\n\n\n.. warning::\n\n ๅฏนไบŽ Python ๆฅ่ฏด Windows ๆ˜ฏไธชๅคง้—ฎ้ข˜. ๆœ‰ๆ—ถไฝ ๅœจไธ€ๅฐ็”ต่„‘ไธŠ่ฃ…ๅพ—ๅฅฝๅฅฝ็š„, ไฝ†ๅœจๅฆๅค–ไธ€ๅฐ็”ต่„‘ไธŠๅดไผšๆœ‰้—ฎ้ข˜. ๅฆ‚ๆžœ็ขฐๅˆฐ้—ฎ้ข˜็š„่ฏ, ไฝ ๅฏไปฅๅ‚่€ƒ: http://docs.python.org/faq/windows.html\n\nWindows: ไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n--------------------------\n\n\n.. code-block:: bat\n\n C:\\Documents and Settings\\you>python\n ActivePython 2.6.5.12 (ActiveState Software Inc.) based on\n Python 2.6.5 (r265:79063, Mar 20 2010, 14:22:52) [MSC v.1500 32 bit (Intel)] on win32\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> ^Z\n\n\n C:\\Documents and Settings\\you>mkdir mystuff\n\n C:\\Documents and Settings\\you>cd mystuff\n\n ... ไฝฟ็”จgeditๅˆ›ๅปบๅ’Œ็ผ–่พ‘text.txt ...\n\n C:\\Documents and Settings\\you\\mystuff>\n < ๅฆ‚ๆžœไฝ ๆฒกๆœ‰ไฝฟ็”จ็ฎก็†ๅ‘˜ๆƒ้™ๅฎ‰่ฃ…็š„่ฏ, ไฝ ๅฏ่ƒฝไผš็œ‹ๅˆฐไธ€ๅคงๅ †ๆ— ๅ…ณ็ดง่ฆ็š„้”™่ฏฏไฟกๆฏ -- ไฝ ๅช่ฆๅฟฝ็•ฅๅฎƒไปฌๆ—ขๅฏ -- ๆ•ฒๅ›ž่ฝฆ้”ฎ็ปง็ปญ >\n C:\\Documents and Settings\\you\\mystuff>dir\n Volume in drive C is\n Volume Serial Number is 085C-7E02\n\n Directory of C:\\Documents and Settings\\you\\mystuff\n\n 04.05.2010 23:32 <DIR> .\n 04.05.2010 23:32 <DIR> ..\n 04.05.2010 23:32 6 test.txt\n 1 File(s) 6 bytes\n 2 Dir(s) 14 804 623 360 bytes free\n\n C:\\Documents and Settings\\you\\mystuff> \n\nไฝ ็œ‹ๅˆฐ็š„ๅ‘ฝไปค่กŒไฟกๆฏ๏ผŒPython ไฟกๆฏ๏ผŒไปฅๅŠๅ…ถๅฎƒไธ€ไบ›ไธœ่ฅฟๅฏ่ƒฝไผš้žๅธธไธไธ€ๆ ท๏ผŒไธ่ฟ‡ๅบ”่ฏฅๅคง่‡ดไธๅทฎใ€‚ไฝ ๅฏไปฅ้€š่ฟ‡ http://learnpythonthehardway.org ๆŠŠไฝ ๆ‰พๅˆฐ็š„้”™ๅค„ๅ‘Š่ฏ‰ๆˆ‘ไปฌ๏ผŒๆˆ‘ไปฌไผšไฟฎๆญฃ่ฟ‡ๆฅใ€‚\n\n\nLinux\n=====\n\nLinux ็ณป็ปŸๅฏ่ฐ“ไบ”่Šฑๅ…ซ้—จ๏ผŒๅฎ‰่ฃ…่ฝฏไปถ็š„ๆ–นๅผไนŸๅ„ๆœ‰ไธๅŒใ€‚ๆˆ‘ไปฌๅ‡่ฎพไฝœไธบ Linux ็”จๆˆท็š„ไฝ ๅทฒ็ป็Ÿฅ้“ๅฆ‚ไฝ•ๅฎ‰่ฃ…่ฝฏไปถๅŒ…ไบ†๏ผŒไปฅไธ‹ๆ˜ฏ็ป™ไฝ ็š„ๆ“ไฝœ่ฏดๆ˜Ž๏ผš\n\n1. 1. ็”จๆต่งˆๅ™จๆ‰“ๅผ€ http://learnpythonthehardway.org/wiki/ExerciseZero ไธ‹่ฝฝๅนถๅฎ‰่ฃ… ``gedit`` ๆ–‡ๆœฌ็ผ–่พ‘ๅ™จใ€‚\n2. ๆŠŠ gedit (ไนŸๅฐฑๆ˜ฏไฝ ็š„็ผ–่พ‘ๅ™จ) ๆ”พๅˆฐ็ช—ๅฃ็ฎก็†ๅ™จๆ˜พ่ง็š„ไฝ็ฝฎ๏ผŒไปฅๆ–นไพฟๆ—ฅๅŽไฝฟ็”จใ€‚\n a. ่ฟ่กŒ gedit๏ผŒๆˆ‘ไปฌ่ฆๅ…ˆๆ”นๆމไธ€ไบ›ๆ„š่ ข็š„้ป˜่ฎค่ฎพๅฎšใ€‚\n b. ไปŽ ``gedit menu`` ไธญๆ‰“ๅผ€ ``Preferences``\\๏ผŒ้€‰ๆ‹ฉ ``Editor`` ้กต้ขใ€‚\n c. ๅฐ† ``Tab width:`` ๆ”นไธบ 4ใ€‚\n d. ้€‰ๆ‹ฉ (็กฎ่ฎคๆœ‰ๅ‹พ้€‰ๅˆฐ่ฏฅ้€‰้กน) ``Insert spaces instead of tabs``\\ใ€‚\n e. ็„ถๅŽๆ‰“ๅผ€ โ€œAutomatic indentationโ€ ้€‰้กนใ€‚\n f. ่ฝฌๅˆฐ ``View`` ้กต้ข๏ผŒๆ‰“ๅผ€ \"Display line numbers\" ้€‰้กนใ€‚\n3. ๆ‰พๅˆฐ \"Terminal\" ็จ‹ๅบใ€‚ๅฎƒ็š„ๅๅญ—ๅฏ่ƒฝๆ˜ฏ ``GNOME Terminal``\\ใ€\\ ``Konsole``\\ใ€ๆˆ–่€… ``xterm``\\ใ€‚\n4. ๆŠŠ Terminal ไนŸๆ”พๅˆฐ Dock ้‡Œ้ขใ€‚\n5. ่ฟ่กŒ Terminal ็จ‹ๅบ๏ผŒ่ฟ™ไธช็จ‹ๅบ็œ‹ไธŠๅŽปไธๆ€Žไนˆๅœฐใ€‚\n6. ๅœจ Terminal ็จ‹ๅบ้‡Œ่พน่ฟ่กŒ ``python``\\ใ€‚่ฟ่กŒ็š„ๆ–นๆณ•ๆ˜ฏ่พ“ๅ…ฅ็จ‹ๅบ็š„ๅๅญ—ๅ†ๆ•ฒไธ€ไธ‹ๅ›ž่ฝฆใ€‚\n a. ๅฆ‚ๆžœไฝ ่ฟ่กŒ ``python`` ๅ‘็Žฐๅฎƒไธๅญ˜ๅœจ็š„่ฏ๏ผŒไฝ ้œ€่ฆๅฎ‰่ฃ…ๅฎƒ๏ผŒ่€Œไธ”่ฆ็กฎ่ฎคไฝ ๅฎ‰่ฃ…็š„ๆ˜ฏ Python 2 ่€Œ้ž Python 3ใ€‚\n7. ๆ•ฒๅ‡ป CTRL-D (^D) ไปฅ้€€ๅ‡บ ``python``\\ใ€‚\n8. ่ฟ™ๆ ทไฝ ๅฐฑๅบ”่ฏฅ้€€ๅ›žๅˆฐๆ•ฒ ``python`` ๅ‰็š„ๆ็คบ็•Œ้ขไบ†ใ€‚ๅฆ‚ๆžœๆฒกๆœ‰็š„่ฏ่‡ชๅทฑ็ ”็ฉถไธ€ไธ‹ไธบไป€ไนˆใ€‚\n9. ๅญฆ็€ไฝฟ็”จ Terminal ๅˆ›ๅปบไธ€ไธช็›ฎๅฝ•ใ€‚ไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขๆ€Žๆ ทๅšใ€‚\n10. ๅญฆ็€ไฝฟ็”จ Terminal ่ฟ›ๅ…ฅไธ€ไธช็›ฎๅฝ•ใ€‚ๅŒๆ ทไฝ ๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n11. ไฝฟ็”จไฝ ็š„็ผ–่พ‘ๅ™จๅœจไฝ ่ฟ›ๅ…ฅ็š„็›ฎๅฝ•ไธ‹ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถใ€‚ไฝ ๅฐ†ๅปบ็ซ‹ไธ€ไธชๆ–‡ไปถ๏ผŒไฝฟ็”จ โ€œSaveโ€ ๆˆ–่€… โ€œSave As...โ€ ้€‰้กน๏ผŒ็„ถๅŽ้€‰ๆ‹ฉ่ฟ™ไธช็›ฎๅฝ•ใ€‚\n12. ไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆขๅ›žๅˆฐ Terminal ็ช—ๅฃ๏ผŒๅฆ‚ๆžœไธ็Ÿฅ้“ๆ€Žๆ ทไฝฟ็”จ้”ฎ็›˜ๅˆ‡ๆข๏ผŒไฝ ไธ€ๆ ทๅฏไปฅไธŠ็ฝ‘ๆœ็ดขใ€‚\n13. ๅ›žๅˆฐ Terminal๏ผŒ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝไฝฟ็”จๅ‘ฝไปค็œ‹ๅˆฐไฝ ๆ–ฐๅปบ็š„ๆ–‡ไปถ๏ผŒไธŠ็ฝ‘ๆœ็ดขๅฆ‚ไฝ•ๅฐ†ๆ–‡ไปถๅคนไธญ็š„ๅ†…ๅฎนๅˆ—ๅ‡บๆฅใ€‚\n\n\nLinux: ไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n--------------------------\n\n\n.. code-block:: console\n\n [~]$ python\n Python 2.6.5 (r265:79063, Apr 1 2010, 05:28:39)\n [GCC 4.4.3 20100316 (prerelease)] on linux2\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>>\n [~]$ mkdir mystuff\n [~]$ cd mystuff\n # ... ไฝฟ็”จgedit็ผ–่พ‘text.txt ...\n [mystuff]$ ls\n test.txt\n [mystuff]$ \n\nไฝ ็œ‹ๅˆฐ็š„ๅ‘ฝไปค่กŒไฟกๆฏ๏ผŒPython ไฟกๆฏ๏ผŒไปฅๅŠๅ…ถๅฎƒไธ€ไบ›ไธœ่ฅฟๅฏ่ƒฝไผš้žๅธธไธไธ€ๆ ทใ€‚ไธ่ฟ‡ๅบ”่ฏฅๅคง่‡ดไธๅทฎๅฐฑๆ˜ฏไบ†ใ€‚\n\n\n็ป™ๆ–ฐๆ‰‹็š„ๅ‘Š่ฏซ\n======================\n\nไฝ ๅทฒ็ปๅฎŒๆˆไบ†่ฟ™่Š‚็ปƒไน ๏ผŒๅ–ๅ†ณไบŽไฝ ๅฏน่ฎก็ฎ—ๆœบ็š„็†Ÿๆ‚‰็จ‹ๅบฆ๏ผŒ่ฟ™ไธช็ปƒไน ๅฏนไฝ ่€Œ่จ€ๅฏ่ƒฝไผšๆœ‰ไบ›้šพใ€‚\\\nๅฆ‚ๆžœไฝ ่ง‰ๅพ—ๆœ‰้šพๅบฆ็š„่ฏ๏ผŒไฝ ่ฆ่‡ชๅทฑๅ…‹ๆœๅ›ฐ้šพ๏ผŒๅคš่Šฑ็‚นๆ—ถ้—ดๅญฆไน ไธ€ไธ‹ใ€‚ๅ› ไธบๅฆ‚ๆžœไฝ ไธไผš่ฟ™ไบ›\\\nๅŸบ็ก€ๆ“ไฝœ็š„่ฏ๏ผŒ็ผ–็จ‹ๅฏนไฝ ๆฅ่ฏดๅฐ†ไผšๆ›ด้šพๅญฆไน ใ€‚\n\nๅฆ‚ๆžœๆœ‰็จ‹ๅบๅ‘˜ๅ‘Š่ฏ‰ไฝ ่ฎฉไฝ ไฝฟ็”จ ``vim`` ๆˆ–่€… ``emacs``\\๏ผŒ้‚ฃไฝ ๅบ”่ฏฅๆ‹’็ปไป–ไปฌใ€‚ๅฝ“ไฝ ๆˆไธบ\\\nไธ€ไธชๆ›ดๅฅฝ็š„็จ‹ๅบๅ‘˜็š„ๆ—ถๅ€™๏ผŒ่ฟ™ไบ›็ผ–่พ‘ๅ™จๆ‰ไผš้€‚ๅˆไฝ ไฝฟ็”จใ€‚ไฝ ็Žฐๅœจ้œ€่ฆ็š„ๅชๆ˜ฏไธ€ไธชๅฏไปฅ็ผ–่พ‘\\\nๆ–‡ๅญ—็š„็ผ–่พ‘ๅ™จใ€‚ๆˆ‘ไปฌไฝฟ็”จ ``gedit`` ๆ˜ฏๅ› ไธบๅฎƒๅพˆ็ฎ€ๅ•๏ผŒ่€Œไธ”ๅœจไธๅŒ็š„็ณป็ปŸไธŠ้ขไฝฟ็”จ่ตทๆฅ\\\nๆ˜ฏไธ€ๆ ท็š„ใ€‚ๅฐฑ่ฟžไธ“ไธš็จ‹ๅบๅ‘˜ไนŸไผšไฝฟ็”จ ``gedit``\\๏ผŒๆ‰€ไปฅๅฏนไบŽๅˆๅญฆ่€Œ่จ€ๅฎƒๅทฒ็ป่ถณๅคŸไบ†ใ€‚\n\nไนŸ่ฎธๆœ‰็จ‹ๅบๅ‘˜ไผšๅ‘Š่ฏ‰ไฝ ่ฎฉไฝ ๅฎ‰่ฃ…ๅ’Œๅญฆไน  Python 3ใ€‚ไฝ ๅบ”่ฏฅๅ‘Š่ฏ‰ไป–ไปฌโ€œ็ญ‰ไฝ ็”ต่„‘้‡Œ็š„ๆ‰€ๆœ‰\\\npython ไปฃ็ ้ƒฝๆ”ฏๆŒ Python 3 ไบ†๏ผŒๆˆ‘ๅ†่ฏ•็€ๅญฆๅญฆๅงใ€‚โ€ไฝ ่ฟ™ๅฅ่ฏ่ถณๅคŸไป–ไปฌๅฟ™ๆดปไธชๅๆฅๅนด็š„ไบ†ใ€‚\n\nๆ€ปๆœ‰ไธ€ๅคฉไฝ ไผšๅฌๅˆฐๆœ‰็จ‹ๅบๅ‘˜ๅปบ่ฎฎไฝ ไฝฟ็”จ Mac OSX ๆˆ–่€… Linuxใ€‚ๅฆ‚ๆžœไป–ๅ–œๆฌขๅญ—ไฝ“็พŽ่ง‚๏ผŒไป–\\\nไผšๅ‘Š่ฏ‰ไฝ ่ฎฉไฝ ๅผ„ๅฐ Mac OSX ่ฎก็ฎ—ๆœบ๏ผŒๅฆ‚ๆžœไป–ไปฌๅ–œๆฌขๆ“ไฝœๆŽงๅˆถ่€Œไธ”็•™ไบ†ไธ€้ƒจๅคง่ƒกๅญ๏ผŒไป–ไผš\\\n่ฎฉไฝ ๅฎ‰่ฃ… Linuxใ€‚่ฟ™้‡Œๅ†ๆฌกๅ‘ไฝ ่ฏดๆ˜Ž๏ผŒๅช่ฆๆ˜ฏไธ€ๅฐๆ‰‹ไธŠ่ƒฝ็”จ็š„็”ต่„‘ๅฐฑๅฏไปฅไบ†ใ€‚ไฝ ้œ€่ฆ็š„ๅช\\\nๆœ‰ไธ‰ๆ ทไธœ่ฅฟ: ``gedit``\\ใ€ไธ€ไธชๅ‘ฝไปค่กŒ็ปˆ็ซฏใ€่ฟ˜ๆœ‰ ``python``\\ใ€‚\n\nๆœ€ๅŽ่ฆ่ฏด็š„ๆ˜ฏ่ฟ™่Š‚็ปƒไน ็š„ๅ‡†ๅค‡ๅทฅไฝœ็š„็›ฎ็š„๏ผŒไนŸๅฐฑๆ˜ฏ่ฎฉไฝ ๅฏไปฅๅœจไปฅๅŽ็š„็ปƒไน ไธญ้กบๅˆฉๅœฐๅšๅˆฐไธ‹้ข็š„่ฟ™ไบ›ไบ‹ๆƒ…๏ผš\n\n1. ไฝฟ็”จ ``gedit`` ็ผ–ๅ†™ไปฃ็ ใ€‚\n2. ่ฟ่กŒไฝ ๅ†™็š„ไน ้ข˜ใ€‚\n3. ไฟฎๆ”น้”™่ฏฏ็š„ไน ้ข˜ใ€‚\n4. ้‡ๅคไธŠ่ฟฐๆญฅ้ชคใ€‚\n\nๅ…ถไป–็š„ไบ‹ๆƒ…ๅชไผš่ฎฉไฝ ๆ›ดๅ›ฐๆƒ‘๏ผŒๆ‰€ไปฅ่ฟ˜ๆ˜ฏๅšๆŒๆŒ‰่ฎกๅˆ’่ฟ›่กŒๅงใ€‚\n\n" }, { "alpha_fraction": 0.6611872315406799, "alphanum_fraction": 0.6767123341560364, "avg_line_length": 22.537633895874023, "blob_id": "a73b9802502f5b28978e9c5aa83ce6645e476a4f", "content_id": "c4cdf30601a3582465ec4e1d739907ab389464e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4534, "license_type": "no_license", "max_line_length": 59, "num_lines": 93, "path": "/cn/ex41.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 41: ๆฅ่‡ช Percal 25 ๅท่กŒๆ˜Ÿ็š„ๅ“ฅ้กฟไบบ(Gothons)\n***********************************************************\n\nไฝ ๅœจไธŠไธ€่Š‚ไธญๅ‘็Žฐ dict ็š„็ง˜ๅฏ†ๅŠŸ่ƒฝไบ†ๅ—๏ผŸไฝ ๅฏไปฅ่งฃ้‡Š็ป™่‡ชๅทฑๅ—๏ผŸ่ฎฉๆˆ‘ๆฅ็ป™ไฝ \\\n่งฃ้‡Šไธ€ไธ‹๏ผŒ้กบไพฟๅ’Œไฝ ่‡ชๅทฑ็š„็†่งฃๅฏนๆฏ”็œ‹ๆœ‰ไป€ไนˆไธๅŒใ€‚่ฟ™้‡Œๆ˜ฏๆˆ‘ไปฌ่ฆ่ฎจ่ฎบ็š„ไปฃ็ ๏ผš\n\n.. code-block:: python\n \n cities['_find'] = find_city\n city_found = cities['_find'](cities, state)\n\nไฝ ่ฆ่ฎฐไฝไธ€ไธชๅ‡ฝๆ•ฐไนŸๅฏไปฅไฝœไธบไธ€ไธชๅ˜้‡๏ผŒ\\``def find_city`` ๆฏ”ๅฆ‚่ฟ™ไธ€ๅฅๅˆ›ๅปบ\\\nไบ†ไธ€ไธชไฝ ๅฏไปฅๅœจไปปไฝ•ๅœฐๆ–น้ƒฝ่ƒฝไฝฟ็”จ็š„ๅ˜้‡ใ€‚ๅœจ่ฟ™ๆฎตไปฃ็ ้‡Œ๏ผŒๆˆ‘ไปฌ้ฆ–ๅ…ˆๆŠŠๅ‡ฝๆ•ฐ\n``find_city`` ๆ”พๅˆฐๅซๅš ``cities`` ็š„ๅญ—ๅ…ธไธญ๏ผŒๅนถๅฐ†ๅ…ถๆ ‡่ฎฐไธบ ``'_find'``\\ใ€‚\n่ฟ™ๅ’Œๆˆ‘ไปฌๅฐ†ๅทžๅ’Œๅธ‚ๅ…ณ่”่ตทๆฅ็š„ไปฃ็ ๅš็š„ไบ‹ๆƒ…ไธ€ๆ ท๏ผŒๅชไธ่ฟ‡ๆˆ‘ไปฌๅœจ่ฟ™้‡Œๆ”พไบ†ไธ€ไธช\\\nๅ‡ฝๆ•ฐ็š„ๅ็งฐใ€‚\n\nๅฅฝไบ†๏ผŒๆ‰€ไปฅไธ€ๆ—ฆๆˆ‘ไปฌ็Ÿฅ้“ ``find_city`` ๆ˜ฏๅœจๅญ—ๅ…ธไธญ ``_find`` ็š„ไฝ็ฝฎ๏ผŒ่ฟ™ๅฐฑ\\\nๆ„ๅ‘ณ็€ๆˆ‘ไปฌๅฏไปฅๅŽป่ฐƒ็”จๅฎƒใ€‚็ฌฌไบŒ่กŒไปฃ็ ๅฏไปฅๅˆ†่งฃๆˆๅฆ‚ไธ‹ๆญฅ้ชค๏ผš\n\n1. Python ็œ‹ๅˆฐ ``city_found =`` ไบŽๆ˜ฏ็Ÿฅ้“ไบ†้œ€่ฆๅˆ›ๅปบไธ€ไธชๅ˜้‡ใ€‚\n2. ็„ถๅŽๅฎƒ่ฏปๅˆฐ ``cities`` ๏ผŒ็„ถๅŽ็Ÿฅ้“ไบ†ๅฎƒๆ˜ฏไธ€ไธชๅญ—ๅ…ธ\n3. ็„ถๅŽ็œ‹ๅˆฐไบ† ``['_find']`` ๏ผŒไบŽๆ˜ฏ Python ๅฐฑไปŽ็ดขๅผ•ๆ‰พๅˆฐไบ†ๅญ—ๅ…ธ ``cities`` \n ไธญๅฏนๅบ”็š„ไฝ็ฝฎ๏ผŒๅนถไธ”่Žทๅ–ไบ†่ฏฅไฝ็ฝฎ็š„ๅ†…ๅฎนใ€‚\n4. ``['_find']`` ่ฟ™ไธชไฝ็ฝฎ็š„ๅ†…ๅฎนๆ˜ฏๆˆ‘ไปฌ็š„ๅ‡ฝๆ•ฐ ``find_city`` ๏ผŒๆ‰€ไปฅ Python\n ๅฐฑ็Ÿฅ้“ไบ†่ฟ™้‡Œ่กจ็คบไธ€ไธชๅ‡ฝๆ•ฐ๏ผŒไบŽๆ˜ฏๅฝ“ๅฎƒ็ขฐๅˆฐ ``(`` ๅฐฑๅผ€ๅง‹ไบ†ๅ‡ฝๆ•ฐ่ฐƒ็”จใ€‚\n5. ``cities, state`` ่ฟ™ไธคไธชๅ‚ๆ•ฐๅฐ†่ขซไผ ้€’ๅˆฐๅ‡ฝๆ•ฐ ``find_city`` ไธญ๏ผŒ็„ถๅŽ่ฟ™ไธช\\\n ๅ‡ฝๆ•ฐๅฐฑ่ขซ่ฟ่กŒไบ†ใ€‚\n6. ``find_city`` ๆŽฅ็€ไปŽ ``cities`` ไธญๅฏปๆ‰พ ``states`` ๏ผŒๅนถไธ”่ฟ”ๅ›žๅฎƒๆ‰พๅˆฐ็š„\\\n ๅ†…ๅฎน๏ผŒๅฆ‚ๆžœไป€ไนˆ้ƒฝๆฒกๆ‰พๅˆฐ๏ผŒๅฐฑ่ฟ”ๅ›žไธ€ไธชไฟกๆฏ่ฏดๅฎƒไป€ไนˆ้ƒฝๆฒกๆ‰พๅˆฐใ€‚\n7. Python ``find_city`` ๆŽฅๅ—่ฟ”ๅ›ž็š„ไฟกๆฏ๏ผŒๆœ€ๅŽๅฐ†่ฏฅไฟกๆฏ่ต‹ๅ€ผ็ป™ไธ€ๅผ€ๅง‹็š„\n ``city_found`` ่ฟ™ไธชๅ˜้‡ใ€‚\n\nๆˆ‘ๅ†ๆ•™ไฝ ไธ€ไธชๅฐๆŠ€ๅทงใ€‚ๅฆ‚ๆžœไฝ ๅ€’็€้˜…่ฏป็š„่ฏ๏ผŒไปฃ็ ๅฏ่ƒฝไผšๅ˜ๅพ—ๆ›ดๅฎนๆ˜“็†่งฃใ€‚่ฎฉๆˆ‘ไปฌ\\\nๆฅ่ฏ•ไธ€ไธ‹๏ผŒไธ€ๆ ทๆ˜ฏ้‚ฃ่กŒ๏ผš\n\n1. ``state`` ๅ’Œ ``city`` ๆ˜ฏ...\n2. ไฝœไธบๅ‚ๆ•ฐไผ ้€’็ป™...\n3. ไธ€ไธชๅ‡ฝๆ•ฐ๏ผŒไฝ็ฝฎๅœจ...\n4. ``'_find'`` ็„ถๅŽๅฏปๆ‰พ๏ผŒ็›ฎ็š„ๅœฐไธบ...\n5. ``cities`` ่ฟ™ไธชไฝ็ฝฎ...\n6. ๆœ€ๅŽ่ต‹ๅ€ผ็ป™ ``city_found``.\n\n่ฟ˜ๆœ‰ไธ€็งๆ–นๆณ•่ฏปๅฎƒ๏ผŒ่ฟ™ๅ›žๆ˜ฏโ€œ็”ฑ้‡Œๅ‘ๅค–โ€ใ€‚\n\n1. ๆ‰พๅˆฐ่กจ่พพๅผ็š„ไธญๅฟƒไฝ็ฝฎ๏ผŒๆญคๆฌกไธบ ``['_find']``.\n2. ้€†ๆ—ถ้’ˆ่ฟฝๆบฏ๏ผŒ้ฆ–ๅ…ˆ็œ‹ๅˆฐ็š„ๆ˜ฏไธ€ไธชๅซ ``cities`` ็š„ๅญ—ๅ…ธ๏ผŒ่ฟ™ๆ ทๅฐฑ็Ÿฅ้“ไบ† cities ไธญ็š„\n ``_find`` ๅ…ƒ็ด ใ€‚\n3. ไธŠไธ€ๆญฅๅพ—ๅˆฐไธ€ไธชๅ‡ฝๆ•ฐใ€‚็ปง็ปญ้€†ๆ—ถ้’ˆๅฏปๆ‰พ๏ผŒ็œ‹ๅˆฐ็š„ๆ˜ฏๅ‚ๆ•ฐใ€‚\n4. ๅ‚ๆ•ฐไผ ้€’็ป™ๅ‡ฝๆ•ฐๅŽ๏ผŒๅ‡ฝๆ•ฐไผš่ฟ”ๅ›žไธ€ไธชๅ€ผใ€‚็„ถๅŽๅ†้€†ๆ—ถ้’ˆๅฏปๆ‰พใ€‚\n5. ๆœ€ๅŽ๏ผŒๆˆ‘ไปฌๅˆฐไบ† ``city_found =`` ็š„่ต‹ๅ€ผไฝ็ฝฎ๏ผŒๅนถไธ”ๅพ—ๅˆฐไบ†ๆœ€็ปˆ็ป“ๆžœใ€‚\n\nๆ•ฐๅๅนด็š„็ผ–็จ‹ไธ‹ๆฅ๏ผŒๆˆ‘ๅœจ่ฏปไปฃ็ ็š„่ฟ‡็จ‹ไธญๅทฒ็ป็”จไธๅˆฐไธŠ้ข็š„ไธ‰็งๆ–นๆณ•ไบ†ใ€‚ๆˆ‘ๅช่ฆ็žŸไธ€็œผ\\\nๅฐฑ่ƒฝ็Ÿฅ้“ๅฎƒ็š„ๆ„ๆ€ใ€‚็”š่‡ณ็ป™ๆˆ‘ไธ€ๆ•ด้กต็š„ไปฃ็ ๏ผŒๆˆ‘ไนŸๅฏไปฅไธ€็œผ็ž„ๅ‡บ้‡Œ่พน็š„ bug ๅ’Œ้”™่ฏฏใ€‚่ฟ™ๆ ท\\\n็š„ๆŠ€่ƒฝๆ˜ฏ่Šฑไบ†่ถ…ไนŽๅธธไบบ็š„ๆ—ถ้—ดๅ’Œ็ฒพๅŠ›ๆ‰้”ป็‚ผๅพ—ๆฅ็š„ใ€‚ๅœจ็ฃจ็ปƒ็š„่ฟ‡็จ‹ไธญ๏ผŒๆˆ‘ๅญฆไผšไบ†ไธ‹้ข\\\nไธ‰็ง่ฏปไปฃ็ ็š„ๆ–นๆณ•๏ผŒๅฎƒไปฌๆ˜ฏ็”จๆˆทๅ‡ ไนŽๆ‰€ๆœ‰็š„็ผ–็จ‹่ฏญ่จ€๏ผš\n\n1. ไปŽๅ‰ๅ‘ๅŽใ€‚\n2. ไปŽๅŽๅ‘ๅ‰ใ€‚\n3. ้€†ๆ—ถ้’ˆๆ–นๅ‘ใ€‚\n\nไธ‹ๆฌก็ขฐๅˆฐ้šพๆ‡‚็š„่ฏญๅฅๆ—ถ๏ผŒไฝ ๅฏไปฅ่ฏ•่ฏ•่ฟ™ไธ‰็งๆ–นๆณ•ใ€‚\n\n็Žฐๅœจๆˆ‘ไปฌๆฅๅ†™่ฟ™ๆฌก็š„็ปƒไน ๏ผŒๅ†™ๅฎŒๅŽๅ†่ฟ‡ไธ€้๏ผŒ่ฟ™่Š‚ไน ้ข˜ๅ…ถๅฎžๆŒบๆœ‰่ถฃ็š„ใ€‚\n\n.. literalinclude:: ex/ex41.py\n :linenos:\n\nไปฃ็ ไธๅฐ‘๏ผŒไธ่ฟ‡่ฟ˜ๆ˜ฏไปŽๅคดๅ†™ๅฎŒๅงใ€‚็กฎ่ฎคๅฎƒ่ƒฝ่ฟ่กŒ๏ผŒ็„ถๅŽ็Žฉไธ€ไธ‹็œ‹็œ‹ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๆˆ‘็Žฉ่ตทๆฅๆ—ถ่ฟ™ๆ ท็š„๏ผš\n\n.. literalinclude:: ex/ex41.txt\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ่งฃ้‡Šไธ€ไธ‹่ฟ”ๅ›ž่‡ณไธ‹ไธ€ไธชๆˆฟ้—ด็š„ๅทฅไฝœๅŽŸ็†ใ€‚\n2. ๅˆ›ๅปบๆ›ดๅคš็š„ๆˆฟ้—ด๏ผŒ่ฎฉๆธธๆˆ่ง„ๆจกๅ˜ๅคงใ€‚\n3. ้™คไบ†่ฎฉๆฏไธชๅ‡ฝๆ•ฐๆ‰“ๅฐ่‡ชๅทฑไปฅๅค–๏ผŒ่ฏ•่ฏ•ๅญฆไน ไธ€ไธ‹โ€œๆ–‡ๆกฃๆณจ่งฃ(doc comments)โ€ใ€‚\\\n ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝๅฐ†ๆˆฟ้—ดๆ่ฟฐๅ†™ๆˆๆ–‡ๆกฃๆณจ่งฃ๏ผŒ็„ถๅŽไฟฎๆ”น่ฟ่กŒๅฎƒ็š„ไปฃ็ ๏ผŒ่ฎฉๅฎƒๆŠŠๆ–‡ๆกฃ\\\n ๆณจ่งฃๆ‰“ๅฐๅ‡บๆฅใ€‚\n4. ไธ€ๆ—ฆไฝ ็”จไบ†ๆ–‡ๆกฃๆณจ่งฃไฝœไธบๆˆฟ้—ดๆ่ฟฐ๏ผŒไฝ ่ฟ˜้œ€่ฆ่ฎฉ่ฟ™ไธชๅ‡ฝๆ•ฐๆ‰“ๅฐๅ‡บ็”จๆˆทๆ็คบๅ—๏ผŸ\\\n ่ฏ•็€่ฎฉ่ฟ่กŒๅ‡ฝๆ•ฐ็š„ไปฃ็ ๆ‰“ๅ‡บ็”จๆˆทๆ็คบๆฅ๏ผŒ็„ถๅŽๅฐ†็”จๆˆท่พ“ๅ…ฅไผ ้€’ๅˆฐๅ„ไธชๅ‡ฝๆ•ฐใ€‚\\\n ไฝ ็š„ๅ‡ฝๆ•ฐๅบ”่ฏฅๅชๆ˜ฏไธ€ไบ› if ่ฏญๅฅ็ป„ๅˆ๏ผŒๅฐ†็ป“ๆžœๆ‰“ๅฐๅ‡บๆฅ๏ผŒๅนถไธ”่ฟ”ๅ›žไธ‹ไธ€ไธชๆˆฟ้—ดใ€‚\n5. ่ฟ™ๅ…ถๅฎžๆ˜ฏไธ€ไธชๅฐ็‰ˆๆœฌ็š„โ€œๆœ‰้™็Šถๆ€ๆœบ(finite state machine)โ€๏ผŒๆ‰พ่ต„ๆ–™\\\n ้˜…่ฏปไบ†่งฃไธ€ไธ‹๏ผŒ่™ฝ็„ถไฝ ๅฏ่ƒฝ็œ‹ไธๆ‡‚๏ผŒไฝ†่ฟ˜ๆ˜ฏๆ‰พๆฅ็œ‹็œ‹ๅงใ€‚\n\n" }, { "alpha_fraction": 0.6857585310935974, "alphanum_fraction": 0.693498432636261, "avg_line_length": 30.487804412841797, "blob_id": "ade0e6ce7b9af8e75cc7335623ce52494cdaa9c3", "content_id": "6e24ff31263604e5c829affcd4751095a90419ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1292, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/ex14.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 14: Prompting And Passing\n**********************************\n\nLet's do one exercise that uses ``argv`` and ``raw_input`` together to\nask the user something specific. You will need this for the next exercise\nwhere we learn to read and write files. In this exercise we'll use\n``raw_input`` slightly differently by having it just print a simple ``>``\nprompt. This is similar to a game like Zork or Adventure.\n\n\n.. literalinclude:: ex/ex14.py\n :linenos:\n\n\nNotice though that we make a variable ``prompt`` that is set to the prompt we\nwant, and we give that to ``raw_input`` instead of typing it over and\nover. Now if we want to make the prompt something else, we just \nchange it in this one spot and rerun the script.\n\nVery handy.\n\n\nWhat You Should See\n===================\n\nWhen you run this, remember that you have to give the script your\nname for the ``argv`` arguments.\n\n.. literalinclude:: ex/ex14.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Find out what Zork and Adventure were.\n Try to find a copy and play it.\n2. Change the ``prompt`` variable to something else entirely.\n3. Add another argument and use it in your script.\n4. Make sure you understand how I combined a ``\"\"\"`` style multi-line string\n with the ``%`` format activator as the last print.\n\n" }, { "alpha_fraction": 0.707911491394043, "alphanum_fraction": 0.71466064453125, "avg_line_length": 42, "blob_id": "2a029d010c3dac840679c46b8757160b9a5c1d2e", "content_id": "beb944ff670191a8f32e61f5e60d0eaf1767bffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2667, "license_type": "no_license", "max_line_length": 155, "num_lines": 62, "path": "/ex33.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 33: While Loops\n************************\n\nNow to totally blow your mind with a new loop, the ``while-loop``. \nA ``while-loop`` will keep executing the code block under it as long as\na boolean expression is ``True``.\n\nWait, you have been keeping up with the terminology right? That if we write a \nline and end it with a ``:`` (colon) then that tells Python to start a new \nblock of code? Then we indent and that's the new code. This is all about\nstructuring your programs so that Python knows what you mean. If you do not\nget that idea then go back and do some more work with ``if-statements``, \nfunctions, and the ``for-loop`` until you get it.\n\nLater on we'll have some exercises that will train your brain to read these\nstructures, similar to how we burned boolean expressions into your brain.\n\nBack to ``while-loops``. What they do is simply do a test like an\n``if-statement``, but instead of running the code block *once*, they jump back\nto the \"top\" where the ``while`` is, and repeat. It keeps doing this until the\nexpression is ``False``.\n\nHere's the problem with ``while-loops``: Sometimes they do not stop. This is\ngreat if your intention is to just keep looping until the end of the universe.\nOtherwise you almost always want your loops to end eventually.\n\nTo avoid these problems, there's some rules to follow:\n\n1. Make sure that you use ``while-loops`` sparingly. Usually a ``for-loop`` is\n better.\n2. Review your while statements and make sure that the thing you are testing will\n become ``False`` at some point.\n3. When in doubt, print out your test variable at the top and bottom of the ``while-loop``\n to see what it's doing.\n\nIn this exercise, you will learn the ``while-loop`` by doing the above three things:\n\n.. literalinclude:: ex/ex33.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex33.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Convert this while loop to a function that you can call, and\n replace ``6`` in the test (``i < 6``) with a variable.\n2. Now use this function to rewrite the script to try different numbers.\n3. Add another variable to the function arguments that you can pass in that\n lets you change the ``+ 1`` on line 8 so you can change how much it \n increments by.\n4. Rewrite the script again to use this function to see what effect that has.\n5. Now, write it to use ``for-loops`` and ``range`` instead. Do you need the incrementor in the middle anymore? What happens if you do not get rid of it?\n\nIf at any time that you are doing this it goes crazy (it probably will), just\nhold down ``CTRL`` and hit ``c`` (CTRL-c) and the program will abort.\n\n" }, { "alpha_fraction": 0.6505681872367859, "alphanum_fraction": 0.6576704382896423, "avg_line_length": 30.977272033691406, "blob_id": "9dd0fc9106ad2b71bec0bffcba25e6c1ebef1e44", "content_id": "10aec37c2ef2cb460478e91cc9076ed6ec0e8d91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1408, "license_type": "no_license", "max_line_length": 93, "num_lines": 44, "path": "/ex12.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 12: Prompting People\n*****************************\n\nWhen you typed ``raw_input()`` you were typing the ``(`` and ``)`` characters\nwhich are ``parenthesis``. This is similar to when you used them to do a\nformat with extra variables, as in ``\"%s %s\" % (x, y)``. For ``raw_input`` you\ncan also put in a prompt to show to a person so they know what to type. Put a\nstring that you want for the prompt inside the ``()`` so that it looks like\nthis:\n\n.. code-block:: python\n\n y = raw_input(\"Name? \")\n\nThis prompts the user with \"Name?\" and puts the result into the variable ``y``.\nThis is how you ask someone a question and get their answer.\n\nThis means we can completely rewrite our previous exercise using just\n``raw_input`` to do all the prompting.\n\n\n.. literalinclude:: ex/ex12.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n\n.. literalinclude:: ex/ex12.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. In Terminal where you normally run ``python`` to run your scripts, \n type: ``pydoc raw_input``. Read what it says. If you're on Windows try\n ``python -m pydoc raw_input`` instead.\n2. Get out of pydoc by typing ``q`` to quit.\n3. Go look online for what the ``pydoc`` command does.\n4. Use pydoc to also read about ``open``, ``file``, ``os``, and ``sys``. It's alright if you\n do not understand those, just read through and take notes about interesting\n things.\n\n" }, { "alpha_fraction": 0.6694630980491638, "alphanum_fraction": 0.681208074092865, "avg_line_length": 23.85416603088379, "blob_id": "bc2bac4d1b630ffef7585ab1ba07fb7be740a76e", "content_id": "f5f8fcababa648f6bf33de70cb105cad4eefdd6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2644, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/cn/ex21.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 21: ๅ‡ฝๆ•ฐๅฏไปฅ่ฟ”ๅ›žไธœ่ฅฟ\n*******************************************\n\nไฝ ๅทฒ็ปๅญฆ่ฟ‡ไฝฟ็”จ ``=`` ็ป™ๅ˜้‡ๅ‘ฝๅ๏ผŒไปฅๅŠๅฐ†ๅ˜้‡ๅฎšไน‰ไธบๆŸไธชๆ•ฐๅญ—ๆˆ–่€…ๅญ—็ฌฆไธฒใ€‚ๆŽฅไธ‹ๆฅๆˆ‘ไปฌ\\\nๅฐ†่ฎฉไฝ ่ง่ฏๆ›ดๅคšๅฅ‡่ฟนใ€‚ๆˆ‘ไปฌ่ฆๆผ”็คบ็ป™ไฝ ็š„ๆ˜ฏๅฆ‚ไฝ•ไฝฟ็”จ ``=`` ไปฅๅŠไธ€ไธชๆ–ฐ็š„ Python ่ฏๆฑ‡\\\n``return`` ๆฅๅฐ†ๅ˜้‡่ฎพ็ฝฎไธบโ€œไธ€ไธชๅ‡ฝๆ•ฐ็š„ๅ€ผโ€ใ€‚ๆœ‰ไธ€็‚นไฝ ้œ€่ฆๅŠๅ…ถๆณจๆ„๏ผŒไธ่ฟ‡ๆˆ‘ไปฌๆš‚ไธ”ไธ่ฎฒ๏ผŒ\\\nๅ…ˆๆ’ฐๅ†™ไธ‹้ข็š„่„šๆœฌๅง๏ผš\n\n.. literalinclude:: ex/ex21.py\n :linenos:\n\n็Žฐๅœจๆˆ‘ไปฌๅˆ›ๅปบไบ†ๆˆ‘ไปฌ่‡ชๅทฑ็š„ๅŠ ๅ‡ไน˜้™คๆ•ฐๅญฆๅ‡ฝๆ•ฐ๏ผš ``add``, ``subtract``, ``multiply``, \nไปฅๅŠ ``divide``\\ใ€‚้‡่ฆ็š„ๆ˜ฏๅ‡ฝๆ•ฐ็š„ๆœ€ๅŽไธ€่กŒ๏ผŒไพ‹ๅฆ‚ ``add`` ็š„ๆœ€ๅŽไธ€่กŒๆ˜ฏ ``return a + b``\\๏ผŒ\\\nๅฎƒๅฎž็Žฐ็š„ๅŠŸ่ƒฝๆ˜ฏ่ฟ™ๆ ท็š„๏ผš\n\n1. ๆˆ‘ไปฌ่ฐƒ็”จๅ‡ฝๆ•ฐๆ—ถไฝฟ็”จไบ†ไธคไธชๅ‚ๆ•ฐ๏ผš ``a`` ๅ’Œ ``b`` ใ€‚\n2. ๆˆ‘ไปฌๆ‰“ๅฐๅ‡บ่ฟ™ไธชๅ‡ฝๆ•ฐ็š„ๅŠŸ่ƒฝ๏ผŒ่ฟ™้‡Œๅฐฑๆ˜ฏ่ฎก็ฎ—ๅŠ ๆณ•๏ผˆadding๏ผ‰\n3. ๆŽฅไธ‹ๆฅๆˆ‘ไปฌๅ‘Š่ฏ‰ Python ่ฎฉๅฎƒๅšๆŸไธชๅ›žไผ ็š„ๅŠจไฝœ๏ผšๆˆ‘ไปฌๅฐ† ``a + b`` ็š„ๅ€ผ่ฟ”ๅ›ž(return)ใ€‚\\\n ๆˆ–่€…ไฝ ๅฏไปฅ่ฟ™ไนˆ่ฏด๏ผšโ€œๆˆ‘ๅฐ† ``a`` ๅ’Œ ``b`` ๅŠ ่ตทๆฅ๏ผŒๅ†ๆŠŠ็ป“ๆžœ่ฟ”ๅ›žใ€‚โ€\n4. Python ๅฐ†ไธคไธชๆ•ฐๅญ—็›ธๅŠ ๏ผŒ็„ถๅŽๅฝ“ๅ‡ฝๆ•ฐ็ป“ๆŸ็š„ๆ—ถๅ€™๏ผŒๅฎƒๅฐฑๅฏไปฅๅฐ† ``a + b`` ็š„็ป“ๆžœ่ต‹ไบˆ\\\n ไธ€ไธชๅ˜้‡ใ€‚\n \nๅ’Œๆœฌไนฆ้‡Œ็š„ๅพˆๅคšๅ…ถไป–ไธœ่ฅฟไธ€ๆ ท๏ผŒไฝ ่ฆๆ…ขๆ…ขๆถˆๅŒ–่ฟ™ไบ›ๅ†…ๅฎน๏ผŒไธ€ๆญฅไธ€ๆญฅๆ‰ง่กŒไธ‹ๅŽป๏ผŒ่ฟฝ่ธชไธ€ไธ‹็ฉถ็ซŸ\\\nๅ‘็”Ÿไบ†ไป€ไนˆใ€‚ไธบไบ†ๅธฎๅŠฉไฝ ็†่งฃ๏ผŒๆœฌ่Š‚็š„ๅŠ ๅˆ†ไน ้ข˜ๅฐ†่ฎฉไฝ ่งฃๅ†ณไธ€ไธช่ฟท้ข˜๏ผŒๅนถไธ”่ฎฉไฝ ๅญฆๅˆฐ็‚นๆฏ”่พƒ\\\n้…ท็š„ไธœ่ฅฟใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n\n.. literalinclude:: ex/ex21.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅฆ‚ๆžœไฝ ไธๆ˜ฏๅพˆ็กฎๅฎš ``return`` ็š„ๅŠŸ่ƒฝ๏ผŒ่ฏ•็€่‡ชๅทฑๅ†™ๅ‡ ไธชๅ‡ฝๆ•ฐๅ‡บๆฅ๏ผŒ่ฎฉๅฎƒไปฌ่ฟ”ๅ›žไธ€ไบ›ๅ€ผใ€‚\\\n ไฝ ๅฏไปฅๅฐ†ไปปไฝ•ๅฏไปฅๆ”พๅœจ ``=`` ๅณ่พน็š„ไธœ่ฅฟไฝœไธบไธ€ไธชๅ‡ฝๆ•ฐ็š„่ฟ”ๅ›žๅ€ผใ€‚\n2. ่ฟ™ไธช่„šๆœฌ็š„็ป“ๅฐพๆ˜ฏไธ€ไธช่ฟท้ข˜ใ€‚ๆˆ‘ๅฐ†ไธ€ไธชๅ‡ฝๆ•ฐ็š„่ฟ”ๅ›žๅ€ผ็”จไฝœไบ†ๅฆๅค–ไธ€ไธชๅ‡ฝๆ•ฐ็š„ๅ‚ๆ•ฐใ€‚ๆˆ‘ๅฐ†\\\n ๅฎƒไปฌ้“พๆŽฅๅˆฐไบ†ไธ€่ตท๏ผŒๅฐฑ่ทŸๅ†™ๆ•ฐๅญฆ็ญ‰ๅผไธ€ๆ ทใ€‚่ฟ™ๆ ทๅฏ่ƒฝๆœ‰ไบ›้šพ่ฏป๏ผŒไธ่ฟ‡่ฟ่กŒไธ€ไธ‹ไฝ ๅฐฑ็Ÿฅ้“็ป“ๆžœ\\\n ไบ†ใ€‚ๆŽฅไธ‹ๆฅ๏ผŒไฝ ้œ€่ฆ่ฏ•่ฏ•็œ‹่ƒฝไธ่ƒฝ็”จๆญฃๅธธ็š„ๆ–นๆณ•ๅฎž็Žฐๅ’Œ่ฟ™ไธช่กจ่พพๅผไธ€ๆ ท็š„ๅŠŸ่ƒฝใ€‚\n3. ไธ€ๆ—ฆไฝ ่งฃๅ†ณไบ†่ฟ™ไธช่ฟท้ข˜๏ผŒ่ฏ•็€ไฟฎๆ”นไธ€ไธ‹ๅ‡ฝๆ•ฐ้‡Œ็š„ๆŸไบ›้ƒจๅˆ†๏ผŒ็„ถๅŽ็œ‹ไผšๆœ‰ไป€ไนˆๆ ท็š„็ป“ๆžœใ€‚\\\n ไฝ ๅฏไปฅๆœ‰็›ฎ็š„ๅœฐไฟฎๆ”นๅฎƒ๏ผŒ่ฎฉๅฎƒ่พ“ๅ‡บๅฆๅค–ไธ€ไธชๅ€ผใ€‚\n4. ๆœ€ๅŽ๏ผŒ้ข ๅ€’่ฟ‡ๆฅๅšไธ€ๆฌกใ€‚ๅ†™ไธ€ไธช็ฎ€ๅ•็š„็ญ‰ๅผ๏ผŒไฝฟ็”จไธ€ๆ ท็š„ๅ‡ฝๆ•ฐๆฅ่ฎก็ฎ—ๅฎƒใ€‚\n\n่ฟ™ไธชไน ้ข˜ๅฏ่ƒฝไผš่ฎฉไฝ ๆœ‰ไบ›ๅคดๅคง๏ผŒไธ่ฟ‡่ฟ˜ๆ˜ฏๆ…ขๆ…ขๆฅ๏ผŒๆŠŠๅฎƒๅฝ“ๅšไธ€ไธชๆธธๆˆ๏ผŒ่งฃๅ†ณ่ฟ™ๆ ท็š„่ฟท้ข˜ๆญฃๆ˜ฏ\\\n็ผ–็จ‹็š„ไน่ถฃไน‹ไธ€ใ€‚ๅŽ้ขไฝ ่ฟ˜ไผš็œ‹ๅˆฐ็ฑปไผผ็š„ๅฐ่ฐœ้ข˜ใ€‚" }, { "alpha_fraction": 0.6749116778373718, "alphanum_fraction": 0.684628963470459, "avg_line_length": 35.45161437988281, "blob_id": "cce111f4f41fc70bc85844dfd98c3e54b7cf399c", "content_id": "3762b3c30f057157acec8243c27e0c29bc2688da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1132, "license_type": "no_license", "max_line_length": 213, "num_lines": 31, "path": "/ex20.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 20: Functions And Files\n********************************\n\nRemember your checklist for functions, then do this exercise \npaying close attention to how functions and files can work together\nto make useful stuff.\n\n.. literalinclude:: ex/ex20.py\n :linenos:\n\nPay close attention to how we pass in the current line number each time\nwe run ``print_a_line``.\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex20.txt\n :language: console\n \n\n\nExtra Credit\n============\n\n1. Go through and write English comments for each line to understand what's\n going on.\n2. Each time ``print_a_line`` is run you are passing in a variable ``current_line``. Write out what ``current_line`` is equal to on each function call, and trace how it becomes ``line_count`` in ``print_a_line``.\n3. Find each place a function is used, and go check its ``def`` to make sure that you are giving it the right arguments.\n4. Research online what the ``seek`` function for ``file`` does. Try ``pydoc file`` \n and see if you can figure it out from there.\n5. Research the shorthand notation ``+=`` and rewrite the script to use that.\n\n\n" }, { "alpha_fraction": 0.7238855361938477, "alphanum_fraction": 0.7278775572776794, "avg_line_length": 35.63414764404297, "blob_id": "eb0c6ffdb1fd2d2b78a676b300ab85b52f577ed2", "content_id": "6520cb851913058218f35d6befb3f169c4cbbdee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1503, "license_type": "no_license", "max_line_length": 78, "num_lines": 41, "path": "/ex31.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 31: Making Decisions\n*****************************\n\nIn the first half of this book you mostly just printed out things and called \nfunctions, but everything was basically in a straight line. Your scripts ran\nstarting at the top, and went to the bottom where they ended. If you \nmade a function you could run that function later, but it still didn't\nhave the kind of branching you need to really make decisions. Now that\nyou have ``if``, ``else``, and ``elif`` you can start to make scripts that\ndecide things.\n\nIn the last script you wrote out a simple set of tests asking some questions.\nIn this script you will ask the user questions and make decisions based\non their answers. Write this script, and then play with it quite a lot\nto figure it out.\n\n.. literalinclude:: ex/ex31.py\n :linenos:\n\nA key point here is that you are now putting the ``if-statements`` *inside*\n``if-statements`` as code that can run. This is very powerful and can be used\nto create \"nested\" decisions, where one branch leads to another and another.\n\nMake sure you understand this concept of if-statements inside\nif-statements. In fact, do the extra credit to really nail it.\n\n\nWhat You Should See\n===================\n\nHere is me playing this little adventure game. I do not do so well.\n\n.. literalinclude:: ex/ex31.txt\n :language: console\n\n\nExtra Credit\n============\n\nMake new parts of the game and change what decisions people can make. Expand\nthe game out as much as you can before it gets ridiculous.\n\n" }, { "alpha_fraction": 0.4419642984867096, "alphanum_fraction": 0.4598214328289032, "avg_line_length": 12.117647171020508, "blob_id": "c06e839851191c90bd83bfcb109eda17e987642e", "content_id": "460db7b2ad138f7ba66a8c8c9f4a8074959fbb80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 336, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/cn/ex9.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 9: ๆ‰“ๅฐ๏ผŒๆ‰“ๅฐ๏ผŒๆ‰“ๅฐ\n****************************************\n\n.. literalinclude:: ex/ex9.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex9.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ่‡ชๅทฑๆฃ€ๆŸฅ็ป“ๆžœ๏ผŒ่ฎฐๅฝ•ไฝ ็Šฏ่ฟ‡็š„้”™่ฏฏ๏ผŒๅนถไธ”ๅœจไธ‹ไธช็ปƒไน ไธญๅฐฝ้‡ไธ็ŠฏๅŒๆ ท็š„้”™่ฏฏใ€‚\n\n" }, { "alpha_fraction": 0.6531986594200134, "alphanum_fraction": 0.6604938507080078, "avg_line_length": 36.87234115600586, "blob_id": "ed9ac468e0a1163ae4396da9349df3be8648462d", "content_id": "576993ae9a2c3e15e40c393770417f65a871ab83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 105, "num_lines": 47, "path": "/ex3.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 3: Numbers And Math\n****************************\n\nEvery programming language has some kind of way of doing numbers and math.\nDo not worry, programmers lie frequently about being math geniuses when they\nreally aren't. If they were math geniuses, they would be doing math, not\nwriting ads and social network games to steal people's money.\n\nThis exercise has lots of math symbols. Let's name them right away so you know\nwhat they are called. As you type this one in, say the names. When saying them\nfeels boring you can stop saying them. Here are the names:\n\n* ``+`` plus\n* ``-`` minus\n* ``/`` slash\n* ``*`` asterisk\n* ``%`` percent\n* ``<`` less-than\n* ``>`` greater-than\n* ``<=`` less-than-equal\n* ``>=`` greater-than-equal\n\nNotice how the operations are missing? After you type in the code for this\nexercise, go back and figure out what each of these does and complete the\ntable. For example, ``+`` does addition.\n\n.. literalinclude:: ex/ex3.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex3.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Above each line, use the ``#`` to write a comment to yourself explaining what the line does.\n2. Remember in Exercise 0 when you started python? Start python this way \n again and using the above characters and what you know, use python as a calculator.\n3. Find something you need to calculate and write a new ``.py`` file that does it.\n4. Notice the math seems \"wrong\"? There are no fractions, only whole numbers.\n Find out why by researching what a \"floating point\" number is.\n5. Rewrite ``ex3.py`` to use floating point numbers so it's more accurate (hint: 20.0 is floating point).\n\n\n" }, { "alpha_fraction": 0.6838235259056091, "alphanum_fraction": 0.6930146813392639, "avg_line_length": 33.72340393066406, "blob_id": "55c66f800f5f96b8fb36447343b094e6e1c411fb", "content_id": "cc79cda3b7e79979e0cad8dd839beff73f713b70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 83, "num_lines": 47, "path": "/ex11.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 11: Asking Questions\n*****************************\n\nNow it is time to pick up the pace. I have got you doing a lot of printing so\nthat you get used to typing simple things, but those simple things are fairly\nboring. What we want to do now is get data into your programs. This is a\nlittle tricky because you have learn to do two things that may not make sense right\naway, but trust me and do it anyway. It will make sense in a few exercises.\n\nMost of what software does is the following:\n\n1. Take some kind of input from a person.\n2. Change it.\n3. Print out something to show how it changed.\n\nSo far you have only been printing, but you haven't been able to get any input\nfrom a person, or change it. You may not even know what \"input\" means, so\nrather than talk about it, let's have you do some and see if you get it. Next\nexercise we'll do more to explain it.\n\n\n.. literalinclude:: ex/ex11.py\n :linenos:\n\n.. note::\n\n Notice that we put a ``,`` (comma) at the end of each ``print`` line.\n This is so that ``print`` doesn't end the line with a newline and go \n to the next line.\n\nWhat You Should See\n===================\n\n\n.. literalinclude:: ex/ex11.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Go online and find out what Python's ``raw_input`` does.\n2. Can you find other ways to use it? Try some of the samples you find.\n3. Write another \"form\" like this to ask some other questions.\n4. Related to escape sequences, try to find out why the last line has ``'6\\'2\"'``\n with that ``\\'`` sequence. See how the single-quote needs to be escaped\n because otherwise it would end the string?\n" }, { "alpha_fraction": 0.6506205201148987, "alphanum_fraction": 0.6694245934486389, "avg_line_length": 31.012048721313477, "blob_id": "17bf2819126b78fd3d949acf0bd9dea2ae61bbaf", "content_id": "6235bbffaea1f1c07514615331bedada559a0e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4837, "license_type": "no_license", "max_line_length": 74, "num_lines": 83, "path": "/cn/ex39.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 39: ๅˆ—่กจ็š„ๆ“ไฝœ\n*******************************\n\nไฝ ๅทฒ็ปๅญฆ่ฟ‡ไบ†ๅˆ—่กจใ€‚ๅœจไฝ ๅญฆไน โ€œwhile ๅพช็Žฏโ€็š„ๆ—ถๅ€™๏ผŒไฝ ๅฏนๅˆ—่กจ่ฟ›่กŒ่ฟ‡โ€œ่ฟฝๅŠ (append)โ€\\\nๆ“ไฝœ๏ผŒ่€Œไธ”ๅฐ†ๅˆ—่กจ็š„ๅ†…ๅฎนๆ‰“ๅฐไบ†ๅ‡บๆฅใ€‚ๅฆๅค–ไฝ ๅบ”่ฏฅ่ฟ˜ๅœจๅŠ ๅˆ†ไน ้ข˜้‡Œ็ ”็ฉถ่ฟ‡ Python ๆ–‡ๆกฃ๏ผŒ\\\n็œ‹ไบ†ๅˆ—่กจๆ”ฏๆŒ็š„ๅ…ถไป–ๆ“ไฝœใ€‚่ฟ™ๅทฒ็ปๆ˜ฏไธ€ๆฎตๆ—ถ้—ดไปฅๅ‰ไบ†๏ผŒๆ‰€ไปฅๅฆ‚ๆžœไฝ ไธ่ฎฐๅพ—ไบ†็š„่ฏ๏ผŒๅฐฑๅ›ž\\\nๅˆฐๆœฌไนฆ็š„ๅ‰้ขๅ†ๅคไน ไธ€้ๆŠŠใ€‚\n\nๆ‰พๅˆฐไบ†ๅ—๏ผŸ่ฟ˜่ฎฐๅพ—ๅ—๏ผŸๅพˆๅฅฝใ€‚้‚ฃๆ—ถๅ€™ไฝ ๅฏนไธ€ไธชๅˆ—่กจๆ‰ง่กŒไบ† ``append`` ๅ‡ฝๆ•ฐใ€‚ไธ่ฟ‡๏ผŒไฝ \\\nไนŸ่ฎธ่ฟ˜ๆฒกๆœ‰็œŸๆญฃๆ˜Ž็™ฝๅ‘็”Ÿ็š„ไบ‹ๆƒ…๏ผŒๆ‰€ไปฅๆˆ‘ไปฌๅ†ๆฅ็œ‹็œ‹ๆˆ‘ไปฌๅฏไปฅๅฏนๅˆ—่กจ่ฟ›่กŒไป€ไนˆๆ ท็š„ๆ“ไฝœใ€‚\n\nๅฝ“ไฝ ็œ‹ๅˆฐๅƒ ``mystuff.append('hello')`` ่ฟ™ๆ ท็š„ไปฃ็ ๆ—ถ๏ผŒไฝ ไบ‹ๅฎžไธŠๅทฒ็ปๅœจ Python ๅ†…้ƒจ\\\nๆฟ€ๅ‘ไบ†ไธ€ไธช่ฟž้”ๅๅบ”ใ€‚ไปฅไธ‹ๆ˜ฏๅฎƒ็š„ๅทฅไฝœๅŽŸ็†๏ผš\n\n1. Python ็œ‹ๅˆฐไฝ ็”จๅˆฐไบ† ``mystuff`` ๏ผŒไบŽๆ˜ฏๅฐฑๅŽปๆ‰พๅˆฐ่ฟ™ไธชๅ˜้‡ใ€‚ไนŸ่ฎธๅฎƒ้œ€่ฆๅ€’็€ๆฃ€ๆŸฅ็œ‹\\\n ไฝ ๆœ‰ๆฒกๆœ‰ๅœจๅ“ช้‡Œ็”จ ``=`` ๅˆ›ๅปบ่ฟ‡่ฟ™ไธชๅ˜้‡๏ผŒๆˆ–่€…ๆฃ€ๆŸฅๅฎƒๆ˜ฏไธๆ˜ฏไธ€ไธชๅ‡ฝๆ•ฐๅ‚ๆ•ฐ๏ผŒๆˆ–่€…็œ‹\\\n ๅฎƒๆ˜ฏไธๆ˜ฏไธ€ไธชๅ…จๅฑ€ๅ˜้‡ใ€‚ไธ็ฎกๅ“ช็งๆ–นๅผ๏ผŒๅฎƒๅพ—ๅ…ˆๆ‰พๅˆฐ ``mystuff`` ่ฟ™ไธชๅ˜้‡ๆ‰่กŒใ€‚\n2. ไธ€ๆ—ฆๅฎƒๆ‰พๅˆฐไบ† ``mystuff`` ๏ผŒๅฐฑ่ฝฎๅˆฐๅค„็†ๅฅ็‚น ``.`` (period) ่ฟ™ไธชๆ“ไฝœ็ฌฆ๏ผŒ่€Œไธ”ๅผ€ๅง‹\\\n ๆŸฅ็œ‹ ``mystuff`` ๅ†…้ƒจ็š„ไธ€ไบ›ๅ˜้‡ไบ†ใ€‚็”ฑไบŽ ``mystuff`` ๆ˜ฏไธ€ไธชๅˆ—่กจ๏ผŒPython ็Ÿฅ้“\\\n ``mystuff`` ๆ”ฏๆŒไธ€ไบ›ๅ‡ฝๆ•ฐใ€‚\n3. ๆŽฅไธ‹ๆฅ่ฝฎๅˆฐไบ†ๅค„็† ``append`` ใ€‚Python ไผšๅฐ† \"append\" ๅ’Œ ``mystuff`` ๆ”ฏๆŒ็š„ๆ‰€ๆœ‰ๅ‡ฝๆ•ฐ\\\n ็š„ๅ็งฐไธ€ไธ€ๅฏนๆฏ”๏ผŒๅฆ‚ๆžœ็กฎๅฎžๅ…ถไธญๆœ‰ไธ€ไธชๅซ append ็š„ๅ‡ฝๆ•ฐ๏ผŒ้‚ฃไนˆ Python ๅฐฑไผšๅŽปไฝฟ็”จ่ฟ™ไธชๅ‡ฝๆ•ฐใ€‚\n4. ๆŽฅไธ‹ๆฅ Python ็œ‹ๅˆฐไบ†ๆ‹ฌๅท ``(`` (parenthesis) ๅนถไธ”ๆ„่ฏ†ๅˆฐ, โ€œๅ™ข๏ผŒๅŽŸๆฅ่ฟ™ๅบ”่ฏฅๆ˜ฏไธ€ไธช\\\n ๅ‡ฝๆ•ฐโ€๏ผŒๅˆฐไบ†่ฟ™้‡Œ๏ผŒๅฎƒๅฐฑๆญฃๅธธไผš่ฐƒ็”จ่ฟ™ไธชๅ‡ฝๆ•ฐไบ†๏ผŒไธ่ฟ‡่ฟ™้‡Œ็š„ๅ‡ฝๆ•ฐ่ฟ˜่ฆๅคšไธ€ไธชๅ‚ๆ•ฐๆ‰่กŒใ€‚\n5. ่ฟ™ไธช้ขๅค–็š„ๅ‚ๆ•ฐๅ…ถๅฎžๆ˜ฏโ€ฆโ€ฆ ``mystuff``! ๆˆ‘็Ÿฅ้“๏ผŒๅพˆๅฅ‡ๆ€ชๆ˜ฏไธๆ˜ฏ๏ผŸไธ่ฟ‡่ฟ™ๅฐฑๆ˜ฏ Python ็š„\\\n ๅทฅไฝœๅŽŸ็†๏ผŒๆ‰€ไปฅ่ฟ˜ๆ˜ฏ่ฎฐไฝ่ฟ™ไธ€็‚น๏ผŒๅฐฑๅฝ“ๅฎƒๆ˜ฏๆญฃๅธธ็š„ๅฅฝไบ†ใ€‚็œŸๆญฃๅ‘็”Ÿ็š„ไบ‹ๆƒ…ๅ…ถๅฎžๆ˜ฏ\n ``append(mystuff, 'hello')`` ๏ผŒไธ่ฟ‡ไฝ ็œ‹ๅˆฐ็š„ๅชๆ˜ฏ ``mystuff.append('hello')``\n ใ€‚\n\nๅคง้ƒจๅˆ†ๆ—ถๅ€™ไฝ ไธ้œ€่ฆ็Ÿฅ้“่ฟ™ไบ›็ป†่Š‚๏ผŒไธ่ฟ‡ๅฆ‚ๆžœไฝ ็œ‹ๅˆฐไธ€ไธชๅƒ่ฟ™ๆ ท็š„ Python ้”™่ฏฏไฟกๆฏ็š„ๆ—ถๅ€™๏ผŒ\\\nไธŠ้ข็š„็ป†่Š‚ๅฐฑๅฏนไฝ ๆœ‰็”จไบ†๏ผš\n\n.. code-block:: pycon\n\n $ python\n Python 2.6.5 (r265:79063, Apr 16 2010, 13:57:41) \n [GCC 4.4.3] on linux2\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> class Thing(object):\n ... def test(hi):\n ... print \"hi\"\n ... \n >>> a = Thing()\n >>> a.test(\"hello\")\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: test() takes exactly 1 argument (2 given)\n >>> \n\nๅฐฑๆ˜ฏ่ฟ™ไธชๅ—๏ผŸๅ—ฏ๏ผŒ่ฟ™ไธชๆ˜ฏๆˆ‘ๅœจ Python ๅ‘ฝไปค่กŒไธ‹ๅฑ•็คบ็ป™ไฝ ็š„ไธ€็‚น้ญ”ๆณ•ใ€‚ไฝ ่ฟ˜ๆฒกๆœ‰่ง่ฟ‡\\\n``class`` ไธ่ฟ‡ๅŽ้ขๅพˆๅฟซๅฐฑ่ฆ็ขฐๅˆฐไบ†ใ€‚็Žฐๅœจไฝ ็œ‹ๅˆฐ Python ่ฏด \n``test() takes exactly 1 argument (2 given)`` (test() ๅชๅฏไปฅๆŽฅๅ—1ไธชๅ‚ๆ•ฐ๏ผŒๅฎž้™…ไธŠ\\\n็ป™ไบ†ไธคไธช)ใ€‚ๅฎƒๆ„ๅ‘ณ็€ python ๆŠŠ ``a.test(\"hello\")`` ๆ”นๆˆไบ† ``test(a, \"hello\")``\n๏ผŒ่€Œๆœ‰ไบบๅผ„้”™ไบ†๏ผŒๆฒกๆœ‰ไธบๅฎƒๆทปๅŠ  ``a`` ่ฟ™ไธชๅ‚ๆ•ฐใ€‚\n\nไธ€ไธ‹ๅญ่ฆๆถˆๅŒ–่ฟ™ไนˆๅคšๅฏ่ƒฝๆœ‰็‚น้šพๅบฆ๏ผŒไธ่ฟ‡ๆˆ‘ไปฌๅฐ†ๅšๅ‡ ไธช็ปƒไน ๏ผŒ่ฎฉไฝ ๅคด่„‘ไธญๆœ‰ไธ€ไธชๆทฑๅˆป็š„ๅฐ่ฑกใ€‚\\\nไธ‹้ข็š„็ปƒไน ๅฐ†ๅญ—็ฌฆไธฒๅ’Œๅˆ—่กจๆททๅœจไธ€่ตท๏ผŒ็œ‹็œ‹ไฝ ่ƒฝไธ่ƒฝๅœจ้‡Œ่พนๆ‰พๅ‡บ็‚นไนๅญๆฅ๏ผš\n\n.. literalinclude:: ex/ex39.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex39.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅฐ†ๆฏไธ€ไธช่ขซ่ฐƒ็”จ็š„ๅ‡ฝๆ•ฐไปฅไธŠ่ฟฐ็š„ๆ–นๅผ็ฟป่ฏ‘ๆˆ Python ๅฎž้™…ๆ‰ง่กŒ็š„ๅŠจไฝœใ€‚ไพ‹ๅฆ‚๏ผš ``' '.join(things)`` \n ๅ…ถๅฎžๆ˜ฏ ``join(' ', things)`` ใ€‚\n2. ๅฐ†่ฟ™ไธค็งๆ–นๅผ็ฟป่ฏ‘ไธบ่‡ช็„ถ่ฏญ่จ€ใ€‚ไพ‹ๅฆ‚๏ผŒ ``' '.join(things)`` ๅฏไปฅ็ฟป่ฏ‘ๆˆโ€œ็”จ ' '\n ่ฟžๆŽฅ(join) thingsโ€๏ผŒ่€Œ ``join(' ', things)`` ็š„ๆ„ๆ€ๆ˜ฏโ€œไธบ ' ' ๅ’Œ things ่ฐƒ็”จ join\n ๅ‡ฝๆ•ฐโ€ใ€‚่ฟ™ๅ…ถๅฎžๆ˜ฏๅŒไธ€ไปถไบ‹ๆƒ…ใ€‚\n3. ไธŠ็ฝ‘้˜…่ฏปไธ€ไบ›ๅ…ณไบŽโ€œ้ขๅ‘ๅฏน่ฑก็ผ–็จ‹(Object Oriented Programming)โ€็š„่ต„ๆ–™ใ€‚ๆ™•ไบ†ๅง๏ผŸๅ—ฏ๏ผŒๆˆ‘ไปฅๅ‰ไนŸๆ˜ฏใ€‚\\\n ๅˆซๆ‹…ๅฟƒใ€‚ไฝ ๅฐ†ไปŽ่ฟ™ๆœฌไนฆๅญฆๅˆฐ่ถณๅคŸ็”จ็š„ๅ…ณไบŽ้ขๅ‘ๅฏน่ฑก็ผ–็จ‹็š„ๅŸบ็ก€็Ÿฅ่ฏ†๏ผŒ่€ŒไปฅๅŽไฝ ่ฟ˜ๅฏไปฅๆ…ขๆ…ขๅญฆๅˆฐๆ›ดๅคšใ€‚\n4. ๆŸฅไธ€ไธ‹ Pythonไธญ็š„ \"class\" ๆ˜ฏไป€ไนˆไธœ่ฅฟใ€‚ไธ่ฆ้˜…่ฏปๅ…ณไบŽๅ…ถไป–่ฏญ่จ€็š„ \"class\" ็š„็”จๆณ•๏ผŒ่ฟ™ไผš่ฎฉไฝ ๆ›ด็ณŠๆถ‚ใ€‚\n5. ``dir(something)`` ๅ’Œ ``something`` ็š„ class ๆœ‰ไป€ไนˆๅ…ณ็ณป๏ผŸ\n6. ๅฆ‚ๆžœไฝ ไธ็Ÿฅ้“ๆˆ‘่ฎฒ็š„ๆ˜ฏไบ›ไป€ไนˆไธœ่ฅฟ๏ผŒๅˆซๆ‹…ๅฟƒใ€‚็จ‹ๅบๅ‘˜ไธบไบ†ๆ˜พๅพ—่‡ชๅทฑ่ชๆ˜Ž๏ผŒไบŽๆ˜ฏๅฐฑๅ‘ๆ˜Žไบ† Opject Oriented\n Programming๏ผŒ็ฎ€็งฐไธบ OOP๏ผŒ็„ถๅŽไป–ไปฌๅฐฑๅผ€ๅง‹ๆปฅ็”จ่ฟ™ไธชไธœ่ฅฟไบ†ใ€‚ๅฆ‚ๆžœไฝ ่ง‰ๅพ—่ฟ™ไธœ่ฅฟๅคช้šพ๏ผŒ\\\n ไฝ ๅฏไปฅๅผ€ๅง‹ๅญฆไธ€ไธ‹ โ€œๅ‡ฝๆ•ฐ็ผ–็จ‹(functional programming)โ€ใ€‚\n\n\n" }, { "alpha_fraction": 0.5309734344482422, "alphanum_fraction": 0.5424779057502747, "avg_line_length": 25.23255729675293, "blob_id": "b2b95333a5cedb7445e2df102f3f6407c755f16e", "content_id": "7d40b6e20ce96fe705f99bf7fbe1b00a1260cfe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2078, "license_type": "no_license", "max_line_length": 55, "num_lines": 43, "path": "/cn/ex3.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 3: ๆ•ฐๅญ—ๅ’Œๆ•ฐๅญฆ่ฎก็ฎ—\n****************************\n\nๆฏไธ€็ง็ผ–็จ‹่ฏญ่จ€้ƒฝๅŒ…ๅซๅค„็†ๆ•ฐๅญ—ๅ’Œ่ฟ›่กŒๆ•ฐๅญฆ่ฎก็ฎ—็š„ๆ–นๆณ•ใ€‚ไธๅฟ…ๆ‹…ๅฟƒ๏ผŒ็จ‹ๅบๅ‘˜็ปๅธธๆ’’่ฐŽ่ฏดไป–ไปฌๆ˜ฏๅคšไนˆ็‰›็š„\\\nๆ•ฐๅญฆๅคฉๆ‰๏ผŒๅ…ถๅฎžไป–ไปฌๆ นๆœฌไธๆ˜ฏใ€‚ๅฆ‚ๆžœไป–ไปฌ็œŸๆ˜ฏๆ•ฐๅญฆๅคฉๆ‰๏ผŒไป–ไปฌๆ—ฉๅฐฑๅŽปไปŽไบ‹ๆ•ฐๅญฆ็›ธๅ…ณ็š„่กŒไธšไบ†๏ผŒ่€Œไธๆ˜ฏ\\\nๅ†™ๅ†™ๅนฟๅ‘Š็จ‹ๅบๅ’Œ็คพไบค็ฝ‘็ปœๆธธๆˆ๏ผŒไปŽไบบไปฌ่บซไธŠๅท่ตš็‚นๅฐ้’ฑ่€Œๅทฒใ€‚\n\n่ฟ™็ซ ็ปƒไน ้‡Œๆœ‰ๅพˆๅคš็š„ๆ•ฐๅญฆ่ฟ็ฎ—็ฌฆๅทใ€‚ๆˆ‘ไปฌๆฅ็œ‹ไธ€้ๅฎƒไปฌ้ƒฝๅซไป€ไนˆๅๅญ—ใ€‚ไฝ ่ฆไธ€่พนๅ†™ไธ€่พนๅฟตๅ‡บๅฎƒไปฌ็š„\\\nๅๅญ—ๆฅ๏ผŒ็›ดๅˆฐไฝ ๅฟต็ƒฆไบ†ไธบๆญขใ€‚ๅๅญ—ๅฆ‚ไธ‹๏ผš\n\n* ``+`` plus ๅŠ ๅท\n* ``-`` minus ๅ‡ๅท\n* ``/`` slash ๆ–œๆ \n* ``*`` asterisk ๆ˜Ÿๅท\n* ``%`` percent ็™พๅˆ†ๅท\n* ``<`` less-than ๅฐไบŽๅท\n* ``>`` greater-than ๅคงไบŽๅท\n* ``<=`` less-than-equal ๅฐไบŽ็ญ‰ไบŽๅท\n* ``>=`` greater-than-equal ๅคงไบŽ็ญ‰ไบŽๅท\n\nๆœ‰ๆฒกๆœ‰ๆณจๆ„ๅˆฐไปฅไธŠๅชๆ˜ฏไบ›็ฌฆๅท๏ผŒๆฒกๆœ‰่ฟ็ฎ—ๆ“ไฝœๅ‘ข๏ผŸๅ†™ๅฎŒไธ‹้ข็š„็ปƒไน ไปฃ็ ๅŽ๏ผŒๅ†ๅ›žๅˆฐไธŠ้ข็š„ๅˆ—่กจ๏ผŒๅ†™ๅ‡บๆฏ\\\nไธช็ฌฆๅท็š„ไฝœ็”จใ€‚ไพ‹ๅฆ‚ ``+`` ๆ˜ฏ็”จๆฅๅšๅŠ ๆณ•่ฟ็ฎ—็š„ใ€‚\n\n.. literalinclude:: ex/ex3.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex3.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ไฝฟ็”จ ``#`` ๅœจไปฃ็ ๆฏไธ€่กŒ็š„ๅ‰ไธ€่กŒไธบ่‡ชๅทฑๅ†™ไธ€ไธชๆณจ่งฃ๏ผŒ่ฏดๆ˜Žไธ€ไธ‹่ฟ™ไธ€่กŒ็š„ไฝœ็”จใ€‚\n2. ่ฎฐๅพ—ๅผ€ๅง‹ๆ—ถ็š„ <็ปƒไน  0> ๅง๏ผŸ็”จ้‡Œ่พน็š„ๆ–นๆณ•ๆŠŠ Python ่ฟ่กŒ่ตทๆฅ๏ผŒ็„ถๅŽไฝฟ็”จๅˆšๆ‰ๅญฆๅˆฐ็š„่ฟ็ฎ—็ฌฆๅท๏ผŒๆŠŠ\\\n Pythonๅฝ“ๅš่ฎก็ฎ—ๅ™จ็Žฉ็Žฉใ€‚\n3. ่‡ชๅทฑๆ‰พไธชๆƒณ่ฆ่ฎก็ฎ—็š„ไธœ่ฅฟ๏ผŒๅ†™ไธ€ไธช ``.py`` ๆ–‡ไปถๆŠŠๅฎƒ่ฎก็ฎ—ๅ‡บๆฅใ€‚\n4. ๆœ‰ๆฒกๆœ‰ๅ‘็Žฐ่ฎก็ฎ—็ป“ๆžœๆ˜ฏ\"้”™\"็š„ๅ‘ข๏ผŸ่ฎก็ฎ—็ป“ๆžœๅชๆœ‰ๆ•ดๆ•ฐ๏ผŒๆฒกๆœ‰ๅฐๆ•ฐ้ƒจๅˆ†ใ€‚็ ”็ฉถไธ€ไธ‹่ฟ™ๆ˜ฏไธบไป€ไนˆ๏ผŒ\\\n ๆœ็ดขไธ€ไธ‹โ€œๆตฎ็‚นๆ•ฐ(floating point number)โ€ๆ˜ฏไป€ไนˆไธœ่ฅฟใ€‚\n5. ไฝฟ็”จๆตฎ็‚นๆ•ฐ้‡ๅ†™ไธ€้ ``ex3.py``\\๏ผŒ่ฎฉๅฎƒ็š„่ฎก็ฎ—็ป“ๆžœๆ›ดๅ‡†็กฎ(ๆ็คบ: 20.0 ๆ˜ฏไธ€ไธชๆตฎ็‚นๆ•ฐ)ใ€‚\n\n\n" }, { "alpha_fraction": 0.7138180732727051, "alphanum_fraction": 0.7280804514884949, "avg_line_length": 45.4782600402832, "blob_id": "b60f8c11fec0d54e6e4129a6b20e146c5062e350", "content_id": "d9bf98c0938983efd1f80c318a43bc50419258aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4277, "license_type": "no_license", "max_line_length": 108, "num_lines": 92, "path": "/ex34.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 34: Accessing Elements Of Lists\n****************************************\n\nLists are pretty useful, but unless you can get at the things in them they\naren't all that good. You can already go through the elements of a list in\norder, but what if you want say, the 5th element? You need to know how to\naccess the elements of a list. Here's how you would access the *first* element\nof a list:\n\n.. code-block:: python\n\n animals = ['bear', 'tiger', 'penguin', 'zebra']\n bear = animals[0]\n\nYou take a list of animals, and then you get the first one using ``0``?! How\ndoes that work? Because of the way math works, Python start its\nlists at 0 rather than 1. It seems weird, but there's many advantages to this,\neven though it is mostly arbitrary.\n\nThe best way to explain why is by showing you the difference between how\nyou use numbers and how programmers use numbers.\n\nImagine you are watching the four animals in our list above\n(``['bear', 'tiger', 'penguin', 'zebra']``) run in a race. They win in the *order* we have\nthem in this list. The race was really exciting because, the animals\ndidn't eat each other and somehow managed to run a race. Your friend however\nshows up late and wants to know who won. Does your friend say, \n\"Hey, who came in *zeroth*?\" No, he says, \"Hey, who came in *first*?\"\n\nThis is because the *order* of the animals is important. You can't have the second\nanimal without the first animal, and can't have the third without the second. It's\nalso impossible to have a \"zeroth\" animal since zero means nothing. How can you have\na nothing win a race? It just doesn't make sense. We call these kinds of \nnumbers \"ordinal\" numbers, because they indicate an ordering of things.\n\nProgrammers, however, can't think this way because they can pick any element\nout of a list at any point. To a programmer, the above list is more like a deck\nof cards. If they want the tiger, they grab it. If they want the zebra, they\ncan take it too. This need to pull elements out of lists at random means that\nthey need a way to indicate elements consistently by an address, or an \"index\", and\nthe best way to do that is to start the indices at 0. Trust me on this, the math\nis *way* easier for these kinds of accesses. This kind of number is a\n\"cardinal\" number and means you can pick at random, so there needs to be a 0\nelement.\n\nSo, how does this help you work with lists? Simple, every time you say\nto yourself, \"I want the 3rd animal,\" you translate this \"ordinal\" number to\na \"cardinal\" number by subtracting 1. The \"3rd\" animal is at index 2 and is the\npenguin. You have to do this because you have spent your whole life using ordinal\nnumbers, and now you have to think in cardinal. Just subtract 1 and you will\nbe good.\n\nRemember: ordinal == ordered, 1st; cardinal == cards at random, 0.\n\nLet's practice this. Take this list of animals, and follow the exercises\nwhere I tell you to write down what animal you get for that ordinal or cardinal\nnumber. Remember if I say \"first\", \"second\", etc. then I'm using ordinal, so\nsubtract 1. If I give you cardinal (0, 1, 2) then use it directly.\n\n.. code-block:: python\n\n animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']\n\n1. The animal at 1.\n2. The 3rd animal.\n3. The 1st animal.\n4. The animal at 3.\n5. The 5th animal.\n6. The animal at 2.\n7. The 6th animal.\n8. The animal at 4.\n\nFor each of these, write out a full sentence of the form: \"The 1st animal is at 0 and is a bear.\"\nThen say it backwards, \"The animal at 0 is the 1st animal and is a bear.\"\n\nUse your python to check your answers.\n\n\nExtra Credit\n============\n\n1. Read about ordinal and cardinal numbers online.\n2. With what you know of the difference between these types of numbers, can you explain why the year 2010 in\n \"January 1, 2010\" really is 2010 and not 2009? (Hint, you can't pick years at random.)\n3. Write some more lists and work out similar indexes until you can translate them.\n4. Use Python to check your answers to this as well.\n\n.. warning::\n\n Programmers will tell you to read this guy named \"Dijkstra\" on this subject.\n I recommend you avoid his writings on this unless you enjoy being yelled at\n by someone who stopped programming at the same time programming started.\n\n" }, { "alpha_fraction": 0.6578108668327332, "alphanum_fraction": 0.6726886034011841, "avg_line_length": 27.484848022460938, "blob_id": "a011bfb6c74efcc99f2a94dc02f96f0a17553b08", "content_id": "1b908705c4f181f78cf5550ae12c58ccaf8b8f36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 941, "license_type": "no_license", "max_line_length": 81, "num_lines": 33, "path": "/ex29.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 29: What If\n********************\n\nHere is the next script of Python you will enter, which introduces you to\nthe ``if-statement``. Type this in, make it run exactly right, and then \nwe'll try see if your practice has paid off.\n\n.. literalinclude:: ex/ex29.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex29.txt\n :language: console\n\n\n\nExtra Credit\n============\n\nIn this extra credit, try to guess what you think the ``if-statement`` is and\nwhat it does. Try to answer these questions in your own words before moving\nonto the next exercise:\n\n1. What do you think the ``if`` does to the code under it?\n2. Why does the code under the ``if`` need to be indented 4 spaces?\n3. What happens if it isn't indented?\n4. Can you put other boolean expressions from Ex. 27 in the ``if-statement``? \n Try it.\n5. What happens if you change the initial variables for ``people``, ``cats``, and\n ``dogs``?\n\n" }, { "alpha_fraction": 0.7541739344596863, "alphanum_fraction": 0.7559182643890381, "avg_line_length": 46.49112319946289, "blob_id": "101eedf82b8d46aaf2dad3a768e33c7ad3faa7d0", "content_id": "a2f8bdfdc290c36e776570aa7544f076e8568156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 8026, "license_type": "no_license", "max_line_length": 87, "num_lines": 169, "path": "/intro.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "The Hard Way Is Easier\n**********************\n\nThis simple book is meant to get you started in programming. The title says\nit's the hard way to learn to write code; but it's actually not. It's only the\n\"hard\" way because it's the way people *used* to teach things. With the help\nof this book, you will do the incredibly simple things that all programmers need\nto do to learn a language:\n\n1. Go through each exercise.\n2. Type in each sample *exactly*.\n3. Make it run.\n\nThat's it. This will be *very* difficult at first, but stick with it. If you\ngo through this book, and do each exercise for one or two hours a night, you will\nhave a good foundation for moving onto another book. You might not really\nlearn \"programming\" from this book, but you will learn the foundation skills you\nneed to start learning the language.\n\nThis book's job is to teach you the three most essential skills that a\nbeginning programmer needs to know: Reading and Writing, Attention to Detail,\nSpotting Differences.\n\n\nReading and Writing\n===================\n\nIt seems stupidly obvious, but, if you have a problem typing, you will have a\nproblem learning to code. Especially if you have a problem typing the fairly\nodd characters in source code. Without this simple skill you will be unable to\nlearn even the most basic things about how software works.\n\nTyping the code samples and getting them to run will help you learn the names of\nthe symbols, get familiar with typing them, and get you reading the language.\n\nAttention to Detail\n===================\n\nThe one skill that separates bad programmers from good programmers is attention\nto detail. In fact, it's what separates the good from the bad in any profession.\nWithout paying attention to the tiniest details of your work, you will miss key\nelements of what you create. In programming, this is how you end up\nwith bugs and difficult-to-use systems.\n\nBy going through this book, and copying each example *exactly*, you will be\ntraining your brain to focus on the details of what you are doing, as you are doing it.\n\n\nSpotting Differences\n====================\n\nA very important skill -- that most programmers develop over time -- is the ability to\nvisually notice differences between things. An experienced programmer can take\ntwo pieces of code that are slightly different and immediately start pointing\nout the differences. Programmers have invented tools to make this even\neasier, but we won't be using any of these. You first have to train your\nbrain the hard way, then you can use the tools.\n\nWhile you do these exercises, typing each one in, you will be making mistakes.\nIt's inevitable; even seasoned programmers would make a few. Your\njob is to compare what you have written to what's required, and fix all the\ndifferences. By doing so, you will train yourself to notice mistakes,\nbugs, and other problems.\n\n\nDo Not Copy-Paste\n=================\n\nYou must *type* each of these exercises in, manually. If you copy and paste,\nyou might as well just not even do them. The point of these exercises is to\ntrain your hands, your brain, and your mind in how to read, write, and see\ncode. If you copy-paste, you are cheating yourself out of the effectiveness of\nthe lessons.\n\n\nA Note On Practice And Persistence\n==================================\n\nWhile you are studying programming, I'm studying how to play guitar. I\npractice it every day for at least 2 hours a day. I play scales, chords, and\narpeggios for an hour at least and then learn music theory, ear training, songs\nand anything else I can. Some days I study guitar and music for 8 hours because I\nfeel like it and it's fun. To me repetitive practice is natural and just how\nto learn something. I know that to get good at anything you have to practice\nevery day, even if I suck that day (which is often) or it's difficult. Keep\ntrying and eventually it'll be easier and fun.\n\nAs you study this book, and continue with programming, remember that anything\nworth doing is difficult at first. Maybe you are the kind of person who is\nafraid of failure so you give up at the first sign of difficulty.\nMaybe you never learned self-discipline so you can't do anything that's\n\"boring\". Maybe you were told that you are \"gifted\" so you never attempt\nanything that might make you seem stupid or not a prodigy. Maybe you are\ncompetitive and unfairly compare yourself to someone like me who's been\nprogramming for 20+ years.\n\nWhatever your reason for wanting to quit, *keep at it*. Force yourself. If\nyou run into an Extra Credit you can't do, or a lesson you just do not understand, then\nskip it and come back to it later. Just keep going because with programming\nthere's this very odd thing that happens.\n\nAt first, you will not understand anything. It'll be weird, just like with\nlearning any human language. You will struggle with words, and not know what\nsymbols are what, and it'll all be very confusing. Then one day *BANG* your\nbrain will snap and you will suddenly \"get it\". If you keep doing the exercises\nand keep trying to understand them, you will get it. You might not be a master\ncoder, but you will at least understand how programming works.\n\nIf you give up, you won't ever reach this point. You will hit the first\nconfusing thing (which is everything at first) and then stop. If you keep\ntrying, keep typing it in, trying to understand it and reading about it, \nyou will eventually get it.\n\nBut, if you go through this whole book, and you still do not understand how to\ncode, at least you gave it a shot. You can say you tried your best and a\nlittle more and it didn't work out, but at least you tried. You can be proud\nof that.\n\n\nA Warning For The Smarties\n==========================\n\nSometimes people who already know a programming language will read this book\nand feel I'm insulting them. There is nothing in this book that is intended to\nbe interpreted as condescending, insulting, or belittling. I simply know more\nabout programming than my *intended* readers. If you think you are smarter\nthan me then you will feel talked down to and there's nothing I can do about\nthat because you are not my *intended* reader.\n\nIf you are reading this book and flipping out at every third sentence\nbecause you feel I'm insulting your intelligence, then I have three points of\nadvice for you:\n\n1. Stop reading my book. I didn't write it for you. I wrote it for people\n who don't already know everything.\n2. Empty before you fill. You will have a hard time learning from someone\n with more knowledge if you already know everything.\n3. Go learn Lisp. I hear people who know everything really like Lisp.\n\nFor everyone else who's here to learn, just read everything as if I'm smiling\nand I have a mischievous little twinkle in my eye.\n\n\nLicense\n=======\n\nThis book is Copyright (C) 2010 by Zed A. Shaw. You are free to distribute\nthis book to anyone you want, so long as you do *not* charge anything for it,\n*and* it is not altered. You must give away the book in its entirety, or not at\nall. This means it's alright for you to teach a class using the book, so long\nas you aren't charging students for the *book* and you give them the whole book\nunmodified.\n\n\nSpecial Thanks\n==============\n\nI'd like to thank a few people who helped with this edition of the book. First\nis my editor at *Pretty Girl Editing Services* who helped me edit the book and is\njust lovely all by herself. Then there's *Greg Newman*, who did the cover jacket\nand artwork, plus reviewed copies of the book. His artwork made the book look\nlike a real book, and didn't mind that I totally forgot to give him credit in\nthe first edition. I'd also like to thank *Brian Shumate* for doing the website\nlanding page and other site design help, which I need a lot of help on.\n\nFinally, I'd like to thank the hundreds of thousands of people who read the first\nedition and especially the ones who submitted bug reports and comments to improve\nthe book. It really made this edition solid and I couldn't have done it without \nall of you. Thank you.\n" }, { "alpha_fraction": 0.652910053730011, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 30.5, "blob_id": "4d76aea3f59011d3575086f1c67c1947e42c2562", "content_id": "c6d158bd2ead01f52f2af62fea32ebc68ef9e986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 945, "license_type": "no_license", "max_line_length": 83, "num_lines": 30, "path": "/ex35.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 35: Branches and Functions\n***********************************\n\nYou have learned to do ``if-statements``, functions, and lists. Now it's time\nto bend your mind. Type this in, and see if you can figure out what it's\ndoing.\n\n.. literalinclude:: ex/ex35.py\n :linenos:\n\n\nWhat You Should See\n===================\n\nHere's me playing the game:\n\n.. literalinclude:: ex/ex35.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Draw a map of the game and how you flow through it.\n2. Fix all of your mistakes, including spelling mistakes.\n3. Write comments for the functions you do not understand. Remember doc comments?\n4. Add more to the game. What can you do to both simplify and expand it.\n5. The ``gold_room`` has a weird way of getting you to type a number. What are all\n the bugs in this way of doing it? Can you make it better than just checking if\n \"1\" or \"0\" are in the number? Look at how ``int()`` works for clues.\n" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6391128897666931, "avg_line_length": 17.33333396911621, "blob_id": "a053389712ecc23d8cd5d0016b214d82eb319cd0", "content_id": "921ced04cc62a7e4cdfdea4a20cd02f13f84b00f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1008, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/cn/ex35.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 35: ๅˆ†ๆ”ฏๅ’Œๅ‡ฝๆ•ฐ\n********************************\n\nไฝ ๅทฒ็ปๅญฆไผšไบ† ``if ่ฏญๅฅ``\\ใ€ๅ‡ฝๆ•ฐใ€่ฟ˜ๆœ‰ๆ•ฐ็ป„(array)ใ€‚็Žฐๅœจไฝ ่ฆ็ปƒไน ๆ‰ญ่ฝฌไธ€ไธ‹ๆ€็ปดไบ†ใ€‚ๆŠŠไธ‹้ข็š„ไปฃ็ \\\nๅ†™ไธ‹ๆฅ๏ผŒ็œ‹ไฝ ๆ˜ฏๅฆ่ƒฝๅผ„ๆ‡‚ๅฎƒๅฎž็Žฐ็š„ๆ˜ฏไป€ไนˆๅŠŸ่ƒฝใ€‚\n\n.. literalinclude:: ex/ex35.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nไฝ ๅฏไปฅ็œ‹ๅˆฐ๏ผŒๆˆ‘ๆ‹ฟๅˆฐๅคชๅคš้‡‘ๅญไบ†๏ผš\n\n.. literalinclude:: ex/ex35.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๆŠŠ่ฟ™ไธชๆธธๆˆ็š„ๅœฐๅ›พ็”ปๅ‡บๆฅ๏ผŒๆŠŠ่‡ชๅทฑ็š„่ทฏ็บฟไนŸ็”ปๅ‡บๆฅใ€‚\n2. ๆ”นๆญฃไฝ ๆ‰€ๆœ‰็š„้”™่ฏฏ๏ผŒๅŒ…ๆ‹ฌๆ‹ผๅ†™้”™่ฏฏใ€‚\n3. ไธบไฝ ไธๆ‡‚็š„ๅ‡ฝๆ•ฐๅ†™ๆณจ่งฃใ€‚่ฎฐๅพ—ๆ–‡ๆกฃๆณจ่งฃ่ฏฅๆ€Žไนˆๅ†™ๅ—๏ผŸ\n4. ไธบๆธธๆˆๆทปๅŠ ๆ›ดๅคšๅ…ƒ็ด ใ€‚้€š่ฟ‡ๆ€Žๆ ท็š„ๆ–นๅผๅฏไปฅ็ฎ€ๅŒ–ๅนถไธ”ๆ‰ฉๅฑ•ๆธธๆˆ็š„ๅŠŸ่ƒฝๅ‘ข๏ผŸ\n5. ่ฟ™ไธช ``gold_room`` ๆธธๆˆไฝฟ็”จไบ†ๅฅ‡ๆ€ช็š„ๆ–นๅผ่ฎฉไฝ ้”ฎๅ…ฅไธ€ไธชๆ•ฐๅญ—ใ€‚่ฟ™็งๆ–นๅผไผšๅฏผ่‡ดไป€ไนˆๆ ท็š„ bug๏ผŸ\n ไฝ ๅฏไปฅ็”จๆฏ”ๆฃ€ๆŸฅ 0ใ€1 ๆ›ดๅฅฝ็š„ๆ–นๅผๅˆคๆ–ญ่พ“ๅ…ฅๆ˜ฏๅฆๆ˜ฏๆ•ฐๅญ—ๅ—๏ผŸ\\ ``int()`` ่ฟ™ไธชๅ‡ฝๆ•ฐๅฏไปฅ็ป™ไฝ ไธ€ไบ›ๅคด็ปชใ€‚ \n" }, { "alpha_fraction": 0.7113352417945862, "alphanum_fraction": 0.7146974205970764, "avg_line_length": 40.619998931884766, "blob_id": "441878e2ea4f12a715b7470d6f2b71cad3dd82f7", "content_id": "fee1ee99ff4023000c8302b72155ee9138009a4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 87, "num_lines": 50, "path": "/ex6.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 6: Strings And Text\n****************************\n\nWhile you have already been writing strings, you still do not know what they do.\nIn this exercise we create a bunch of variables with complex strings so you can\nsee what they are for. First an explanation of strings.\n\nA string is usually a bit of text you want to display to someone, or \"export\"\nout of the program you are writing. Python knows you want something to be a\nstring when you put either ``\"`` (double-quotes) or ``'`` (single-quotes)\naround the text. You saw this many times with your use of ``print`` when you\nput the text you want to go to the string inside ``\"`` or ``'`` after the\n``print``. Then Python prints it.\n\nStrings may contain the format characters you have discovered so far. You\nsimply put the formatted variables in the string, and then a ``%`` (percent)\ncharacter, followed by the variable. The *only* catch is that if you want\nmultiple formats in your string to print multiple variables, you need to\nput them inside ``( )`` (parenthesis) separated by ``,`` (commas). It's as if\nyou were telling me to buy you a list of items from the store and you said, \"I\nwant milk, eggs, bread, and soup.\" Only as a programmer we say, \"(milk,\neggs, bread, soup)\".\n\nWe will now type in a whole bunch of strings, variables, formats, and print\nthem. You will also practice using short abbreviated variable names.\nProgrammers love saving themselves time at your expense by using annoying\ncryptic variable names, so let's get you started being able to read and write\nthem early on.\n\n\n.. literalinclude:: ex/ex6.py\n :linenos:\n\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex6.txt\n :language: console\n :linenos:\n\n\nExtra Credit\n============\n\n1. Go through this program and write a comment above each line explaining it.\n2. Find all the places where a string is put inside a string. There are four places.\n3. Are you sure there's only four places? How do you know? Maybe I like lying.\n4. Explain why adding the two strings ``w`` and ``e`` with ``+`` makes a longer string.\n\n" }, { "alpha_fraction": 0.6879432797431946, "alphanum_fraction": 0.6954526305198669, "avg_line_length": 39.62711715698242, "blob_id": "951afd6403cbbb6cfcbbaad818cda25c4ee23d9a", "content_id": "bc814397179e1d3b8bc56b3bba58fa3b779285bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 93, "num_lines": 59, "path": "/ex16.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 16: Reading And Writing Files\n**************************************\n\nIf you did the extra credit from the last exercise you should have seen all \nsorts of commands (methods/functions) you can give to files. Here's the\nlist of commands I want you to remember:\n\n* close -- Closes the file. Like ``File->Save..`` in your editor.\n* read -- Reads the contents of the file, you can assign the result to a variable.\n* readline -- Reads just one line of a text file.\n* truncate -- Empties the file, watch out if you care about the file.\n* write(stuff) -- Writes stuff to the file.\n\nFor now these are the important commands you need to know. Some of them\ntake parameters, but we do not really care about that. You only need to\nremember that ``write`` takes a parameter of a string you want to write\nto the file.\n\nLet's use some of this to make a simple little text editor:\n\n\n.. literalinclude:: ex/ex16.py\n :linenos:\n\nThat's a large file, probably the largest you have typed in. So go slow, do\nyour checks, and make it run. One trick is to get bits of it running at a\ntime. Get lines 1-8 running, then 5 more, then a few more, etc., until it's\nall done and running.\n\n\nWhat You Should See\n===================\n\nThere are actually two things you will see, first the output of your new\nscript:\n\n.. literalinclude:: ex/ex16.txt\n :language: console\n\nNow, open up the file you made (in my case ``test.txt``) in your\neditor and check it out. Neat right?\n\n\nExtra Credit\n============\n\n1. If you feel you do not understand this, go back through and use the comment\n trick to get it squared away in your mind. One simple English comment\n above each line will help you understand, or at least let you know what\n you need to research more.\n2. Write a script similar to the last exercise that uses ``read`` and ``argv`` \n to read the file you just created.\n3. There's too much repetition in this file. Use strings, formats, and escapes\n to print out ``line1``, ``line2``, and ``line3`` with just one ``target.write()``\n command instead of 6.\n4. Find out why we had to pass a ``'w'`` as an extra parameter to ``open``. Hint:\n ``open`` tries to be safe by making you explicitly say you want to write a file.\n5. If you open the file with ``'w'`` mode, then do you really need the ``target.truncate()``?\n Go read the docs for Python's ``open`` function and see if that's true.\n" }, { "alpha_fraction": 0.7939464449882507, "alphanum_fraction": 0.805587887763977, "avg_line_length": 24.235294342041016, "blob_id": "9c83b54440c6d59b35702319bf9caca0026f889a", "content_id": "ae275526391cf56b8f063fef87ff11ec79269622", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2223, "license_type": "no_license", "max_line_length": 55, "num_lines": 34, "path": "/cn/ex26.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 26: ๆญๅ–œไฝ ๏ผŒ็Žฐๅœจๅฏไปฅ่€ƒ่ฏ•ไบ†๏ผ\n******************************************\n\nไฝ ๅทฒ็ปๅทฎไธๅคšๅฎŒๆˆ่ฟ™ๆœฌไนฆ็š„ๅ‰ๅŠ้ƒจๅˆ†ไบ†๏ผŒไธ่ฟ‡ๅŽๅŠ้ƒจๅˆ†ๆ‰ๆ˜ฏๆ›ดๆœ‰่ถฃ็š„ใ€‚ไฝ ๅฐ†ๅญฆๅˆฐ้€ป่พ‘๏ผŒๅนถ้€š่ฟ‡\\\nๆกไปถๅˆคๆ–ญๅฎž็Žฐๆœ‰็”จ็š„ๅŠŸ่ƒฝใ€‚\n\nๅœจไฝ ็ปง็ปญๅญฆไน ไน‹ๅ‰๏ผŒไฝ ๆœ‰ไธ€้“่ฏ•้ข˜่ฆๅšใ€‚่ฟ™้“่ฏ•้ข˜ๅพˆ้šพ๏ผŒๅ› ไธบๅฎƒ้œ€่ฆไฝ ไฟฎๆญฃๅˆซไบบๅ†™็š„ไปฃ็ ใ€‚ๅฝ“\\\nไฝ ๆˆไธบ็จ‹ๅบๅ‘˜ไปฅๅŽ๏ผŒไฝ ๅฐ†้œ€่ฆ็ปๅธธ้ขๅฏนๅˆซ็š„็จ‹ๅบๅ‘˜็š„ไปฃ็ ๏ผŒไนŸ่ฎธ่ฟ˜ๆœ‰ไป–ไปฌ็š„ๅ‚ฒๆ…ขๆ€ๅบฆ๏ผŒไป–ไปฌ\\\nไผš็ปๅธธ่ฏด่‡ชๅทฑ็š„ไปฃ็ ๆ˜ฏๅฎŒ็พŽ็š„ใ€‚\n\n่ฟ™ๆ ท็š„็จ‹ๅบๅ‘˜ๆ˜ฏ่‡ชไปฅไธบๆ˜ฏไธๅœจไนŽๅˆซไบบ็š„่ ข่ดงใ€‚ไผ˜็ง€็š„็ง‘ๅญฆๅฎถไผšๅฏนไป–ไปฌ่‡ชๅทฑ็š„ๅทฅไฝœๆŒๆ€€็–‘ๆ€ๅบฆ๏ผŒ\\\nๅŒๆ ท๏ผŒไผ˜็ง€็š„็จ‹ๅบๅ‘˜ไนŸไผš่ฎคไธบ่‡ชๅทฑ็š„ไปฃ็ ๆ€ปๆœ‰ๅ‡บ้”™็š„ๅฏ่ƒฝ๏ผŒไป–ไปฌไผšๅ…ˆๅ‡่ฎพๆ˜ฏ่‡ชๅทฑ็š„ไปฃ็ ๆœ‰้—ฎ้ข˜๏ผŒ\\\n็„ถๅŽ็”จๆŽ’้™คๆณ•ๆธ…ๆŸฅๆ‰€ๆœ‰ๅฏ่ƒฝๆ˜ฏ่‡ชๅทฑๆœ‰้—ฎ้ข˜็š„ๅœฐๆ–น๏ผŒๆœ€ๅŽๆ‰ไผšๅพ—ๅ‡บโ€œ่ฟ™ๆ˜ฏๅˆซไบบ็š„้”™่ฏฏโ€่ฟ™ๆ ท็š„็ป“่ฎบใ€‚\n\nๅœจ่ฟ™่Š‚็ปƒไน ไธญ๏ผŒไฝ ๅฐ†้ขๅฏนไธ€ไธชๆฐดๅนณ็ณŸ็ณ•็š„็จ‹ๅบๅ‘˜๏ผŒๅนถๆ”นๅฅฝไป–็š„ไปฃ็ ใ€‚ๆˆ‘ๅฐ†ไน ้ข˜ 24 ๅ’Œ 25 ่ƒกไนฑ\\\nๆ‹ท่ดๅˆฐไบ†ไธ€ไธชๆ–‡ไปถไธญ๏ผŒ้šๆœบๅœฐๅˆ ๆމไบ†ไธ€ไบ›ๅญ—็ฌฆ๏ผŒ็„ถๅŽๆทปๅŠ ไบ†ไธ€ไบ›้”™่ฏฏ่ฟ›ๅŽปใ€‚ๅคง้ƒจๅˆ†็š„้”™่ฏฏๆ˜ฏ\\\nPython ๅœจๆ‰ง่กŒๆ—ถไผšๅ‘Š่ฏ‰ไฝ ็š„๏ผŒ่ฟ˜ๆœ‰ไธ€ไบ›็ฎ—ๆœฏ้”™่ฏฏๆ˜ฏไฝ ่ฆ่‡ชๅทฑๆ‰พๅ‡บๆฅ็š„ใ€‚ๅ†ๅ‰ฉไธ‹ๆฅ็š„ๅฐฑๆ˜ฏๆ ผๅผ\\\nๅ’Œๆ‹ผๅ†™้”™่ฏฏไบ†ใ€‚\n\nๆ‰€ๆœ‰่ฟ™ไบ›้”™่ฏฏ้ƒฝๆ˜ฏ็จ‹ๅบๅ‘˜ๅพˆๅฎนๆ˜“็Šฏ็š„๏ผŒๅฐฑ็ฎ—ๆœ‰็ป้ชŒ็š„็จ‹ๅบๅ‘˜ไนŸไธไพ‹ๅค–ใ€‚\n\nไฝ ็š„ไปปๅŠกๆ˜ฏๅฐ†ๆญคๆ–‡ไปถไฟฎๆ”นๆญฃ็กฎ๏ผŒ็”จไฝ ๆ‰€ๆœ‰็š„ๆŠ€่ƒฝๆ”น่ฟ›่ฟ™ไธช่„šๆœฌใ€‚ไฝ ๅฏไปฅๅ…ˆๅˆ†ๆž่ฟ™ไธชๆ–‡ไปถ๏ผŒ\\\nๆˆ–่€…ไฝ ่ฟ˜ๅฏไปฅๆŠŠๅฎƒๅƒๅญฆๆœŸ่ฎบๆ–‡ไธ€ๆ ทๆ‰“ๅฐๅ‡บๆฅ๏ผŒไฟฎๆญฃ้‡Œ่พน็š„ๆฏไธ€ไธช็ผบ้™ท๏ผŒ้‡ๅคไฟฎๆญฃๅ’Œ่ฟ่กŒ็š„\\\nๅŠจไฝœ๏ผŒ็›ดๅˆฐ่ฟ™ไธช่„šๆœฌๅฏไปฅๅฎŒ็พŽๅœฐ่ฟ่กŒ่ตทๆฅใ€‚ๅœจๆ•ดไธช่ฟ‡็จ‹ไธญไธ่ฆๅฏปๆฑ‚ๅธฎๅŠฉ๏ผŒๅฆ‚ๆžœไฝ ๅกๅœจๆŸไธชๅœฐๆ–น\\\nๆ— ๆณ•่ฟ›่กŒไธ‹ๅŽป๏ผŒ้‚ฃๅฐฑไผ‘ๆฏไธ€ไผšๆ™š็‚นๅ†ๅšใ€‚\n\nๅฐฑ็ฎ—ไฝ ้œ€่ฆๅ‡ ๅคฉๆ‰่ƒฝๅฎŒๆˆ๏ผŒไนŸไธ่ฆๆ”พๅผƒ๏ผŒ็›ดๅˆฐๅฎŒๅ…จๆ”นๅฏนไธบๆญขใ€‚\n\nๆœ€ๅŽ่ฆ่ฏด็š„ๆ˜ฏ๏ผŒ่ฟ™ไธช็ปƒไน ็š„็›ฎ็š„ไธๆ˜ฏๅ†™็จ‹ๅบ๏ผŒ่€Œๆ˜ฏไฟฎๆญฃ็Žฐๆœ‰็š„็จ‹ๅบ๏ผŒไฝ ้œ€่ฆ่ฎฟ้—ฎไธ‹้ข็š„็ฝ‘็ซ™๏ผš\n\n* http://learnpythonthehardway.com/wiki?name=Exercise26\n\nไปŽ้‚ฃ้‡ŒๆŠŠไปฃ็ ๅคๅˆถ็ฒ˜่ดด่ฟ‡ๆฅ๏ผŒๅ‘ฝๅไธบ ``ex26.py``\\๏ผŒ่ฟ™ไนŸๆ˜ฏๆœฌไนฆๅ”ฏไธ€ไธ€ๅค„ๅ…่ฎธไฝ ๅคๅˆถ็ฒ˜่ดด\\\n็š„ๅœฐๆ–นใ€‚\n\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7318627238273621, "avg_line_length": 25.8157901763916, "blob_id": "db1fdf19c688a3e2d77aec07d5dea8024c2b14de", "content_id": "f2b254d97684aad4727b8c96250311dff3008343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4320, "license_type": "no_license", "max_line_length": 76, "num_lines": 76, "path": "/cn/ex34.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 34: ่ฎฟ้—ฎๅˆ—่กจ็š„ๅ…ƒ็ด \n****************************************\n\nๅˆ—่กจ็š„็”จๅค„ๅพˆๅคง๏ผŒไฝ†ๅชๆœ‰ไฝ ่ƒฝ่ฎฟ้—ฎ้‡Œ่พน็š„ๅ†…ๅฎนๆ—ถๅฎƒๆ‰่ƒฝๅ‘ๆŒฅๅ‡บไฝœ็”จๆฅใ€‚ไฝ ๅทฒ็ปๅญฆไผšไบ†\\\nๆŒ‰้กบๅบ่ฏปๅ‡บๅˆ—่กจ็š„ๅ†…ๅฎน๏ผŒไฝ†ๅฆ‚ๆžœไฝ ่ฆๅพ—ๅˆฐ็ฌฌ 5 ไธชๅ…ƒ็ด ่ฏฅๆ€ŽไนˆๅŠžๅ‘ข๏ผŸไฝ ้œ€่ฆ็Ÿฅ้“ๅฆ‚ไฝ•\\\n่ฎฟ้—ฎๅˆ—่กจไธญ็š„ๅ…ƒ็ด ใ€‚่ฎฟ้—ฎ็ฌฌไธ€ไธชๅ…ƒ็ด ็š„ๆ–นๆณ•ๆ˜ฏ่ฟ™ๆ ท็š„๏ผš\n\n.. code-block:: python\n\n animals = ['bear', 'tiger', 'penguin', 'zebra']\n bear = animals[0]\n\nไฝ ๅฎšไน‰ไธ€ไธช animals ็š„ๅˆ—่กจ๏ผŒ็„ถๅŽไฝ ็”จ ``0`` ๆฅ่Žทๅ–็ฌฌไธ€ไธชๅ…ƒ็ด ?! ่ฟ™ๆ˜ฏๆ€Žไนˆๅ›žไบ‹ๅ•Š๏ผŸ\\\nๅ› ไธบๆ•ฐๅญฆ้‡Œ่พนๅฐฑๆ˜ฏ่ฟ™ๆ ท๏ผŒๆ‰€ไปฅ Python ็š„ๅˆ—่กจไนŸๆ˜ฏไปŽ 0 ๅผ€ๅง‹็š„ใ€‚่™ฝ็„ถ็œ‹ไธŠๅŽปๅพˆๅฅ‡ๆ€ช๏ผŒ\\\n่ฟ™ๆ ทๅฎšไน‰ๅ…ถๅฎžๆœ‰ๅฎƒ็š„ๅฅฝๅค„๏ผŒ่€Œไธ”ๅฎž้™…ไธŠ่ฎพ่ฎกๆˆ 0 ๆˆ–่€… 1 ๅผ€ๅคดๅ…ถๅฎž้ƒฝๅฏไปฅ๏ผŒ\n\nๆœ€ๅฅฝ็š„่งฃ้‡Šๆ–นๅผๆ˜ฏๅฐ†ไฝ ๅนณๆ—ถไฝฟ็”จๆ•ฐๅญ—็š„ๆ–นๅผๅ’Œ็จ‹ๅบๅ‘˜ไฝฟ็”จๆ•ฐๅญ—็š„ๆ–นๅผๅšๅฏนๆฏ”ใ€‚\n\nๅ‡่ฎพไฝ ๅœจ่ง‚็œ‹ไธŠ้ขๅˆ—่กจไธญ็š„ๅ››็งๅŠจ็‰ฉ(``['bear', 'tiger', 'penguin', 'zeebra']``) \n็š„่ต›่ท‘๏ผŒ่€Œๅฎƒไปฌๆฏ”่ต›็š„ๅ่ฏๆญฃๅฅฝ่ทŸๅˆ—่กจ้‡Œ็š„ๆฌกๅบไธ€ๆ ทใ€‚่ฟ™ๆ˜ฏไธ€ๅœบๅพˆๆฟ€ๅŠจไบบๅฟƒ็š„ๆฏ”่ต›๏ผŒ\\\nๅ› ไธบ่ฟ™ไบ›ๅŠจ็‰ฉๆฒกๆ‰“็ฎ—ๅƒๆމๅฏนๆ–น๏ผŒ่€Œไธ”ๆฏ”่ต›่ฟ˜็œŸ็š„ไธพๅŠž่ตทๆฅไบ†ใ€‚็ป“ๆžœไฝ ็š„ๆœ‹ๅ‹ๆฅๆ™šไบ†๏ผŒ\\\nไป–ๆƒณ็Ÿฅ้“่ฐ่ตขไบ†ๆฏ”่ต›๏ผŒไป–ไผš้—ฎไฝ โ€œๅ˜ฟ๏ผŒ่ฐๆ˜ฏ็ฌฌ 0 ๅโ€ๅ—๏ผŸไธไผš็š„๏ผŒไป–ไผš้—ฎโ€œๅ˜ฟ๏ผŒ่ฐๆ˜ฏ็ฌฌ 1 ๅ๏ผŸโ€\n\n่ฟ™ๆ˜ฏๅ› ไธบๅŠจ็‰ฉ็š„ๆฌกๅบๆ˜ฏๅพˆ้‡่ฆ็š„ใ€‚ๆฒกๆœ‰็ฌฌไธ€ไธชๅฐฑๆฒกๆœ‰็ฌฌไบŒไธช๏ผŒๆฒกๆœ‰็ฌฌไบŒไธชไนŸๆฒกๆœ‰็ฌฌไธ‰ไธชใ€‚\\\n็ฌฌ้›ถไธชๆ˜ฏไธๅญ˜ๅœจ็š„๏ผŒๅ› ไธบ้›ถ็š„ๆ„ๆ€ๆ˜ฏไป€ไนˆ้ƒฝๆฒกๆœ‰ใ€‚โ€œไป€ไนˆ้ƒฝๆฒกๆœ‰โ€ๆ€Žไนˆ่ตขๆฏ”่ต›ๅ˜›๏ผŒๅฎŒๅ…จไธๅˆ\\\n้€ป่พ‘ใ€‚่ฟ™ๆ ท็š„ๆ•ฐๅญ—ๆˆ‘ไปฌ็งฐไน‹ไธบโ€œๅบๆ•ฐ(ordinal number)โ€๏ผŒๅ› ไธบๅฎƒไปฌ่กจ็คบ็š„ๆ˜ฏไบ‹็‰ฉ็š„้กบๅบใ€‚\n\n่€Œ็จ‹ๅบๅ‘˜ไธ่ƒฝ็”จ่ฟ™็งๆ–นๅผๆ€่€ƒ้—ฎ้ข˜๏ผŒๅ› ไธบไป–ไปฌๅฏไปฅไปŽๅˆ—่กจ็š„ไปปไฝ•ไธ€ไธชไฝ็ฝฎๅ–ๅ‡บไธ€ไธชๅ…ƒ็ด ๆฅใ€‚\\\nๅฏน็จ‹ๅบๅ‘˜ๆฅ่ฏด๏ผŒไธŠ่ฟฐ็š„ๅˆ—่กจๆ›ดๅƒๆ˜ฏไธ€ๅ ๅก็‰‡ใ€‚ๅฆ‚ๆžœไป–ไปฌๆƒณ่ฆ tiger๏ผŒๅฐฑๆŠ“ๅฎƒๅ‡บๆฅ๏ผŒๅฆ‚ๆžœๆƒณ่ฆ\\\nzeebra๏ผŒไนŸไธ€ๆ ทๆŠ“ๅ–ๅ‡บๆฅใ€‚่ฆ้šๆœบๅœฐๆŠ“ๅ–ๅˆ—่กจ้‡Œ็š„ๅ†…ๅฎน๏ผŒๅˆ—่กจ็š„ๆฏไธ€ไธชๅ…ƒ็ด ้ƒฝๅบ”่ฏฅๆœ‰ไธ€ไธชๅœฐ\\\nๅ€๏ผŒๆˆ–่€…ไธ€ไธช \"index๏ผˆ็ดขๅผ•๏ผ‰\"๏ผŒ่€Œๆœ€ๅฅฝ็š„ๆ–นๅผๆ˜ฏไฝฟ็”จไปฅ 0 ๅผ€ๅคด็š„็ดขๅผ•ใ€‚็›ธไฟกๆˆ‘่ฏด็š„่ฟ™ไธ€็‚น\\\nๅง๏ผŒ่ฟ™็งๆ–นๅผ่Žทๅ–ๅ…ƒ็ด ไผšๆ›ดๅฎนๆ˜“ใ€‚่ฟ™็ฑป็š„ๆ•ฐๅญ—่ขซ็งฐไธบโ€œๅŸบๆ•ฐ(cardinal number)โ€๏ผŒๅฎƒๆ„ๅ‘ณ็€\\\nไฝ ๅฏไปฅไปปๆ„ๆŠ“ๅ–ๅ…ƒ็ด ๏ผŒๆ‰€ไปฅๆˆ‘ไปฌ้œ€่ฆไธ€ไธช 0 ๅทๅ…ƒ็ด ใ€‚\n\n้‚ฃไนˆ๏ผŒ่ฟ™ไบ›็Ÿฅ่ฏ†ๅฏนไบŽไฝ ็š„ๅˆ—่กจๆ“ไฝœๆœ‰ไป€ไนˆๅธฎๅŠฉๅ‘ข๏ผŸๅพˆ็ฎ€ๅ•๏ผŒๆฏๆฌกไฝ ๅฏน่‡ชๅทฑ่ฏดโ€œๆˆ‘่ฆ็ฌฌ 3 ๅชๅŠจ็‰ฉโ€\\\nๆ—ถ๏ผŒไฝ ้œ€่ฆๅฐ†โ€œๅบๆ•ฐโ€่ฝฌๆขๆˆโ€œๅŸบๆ•ฐโ€๏ผŒๅช่ฆๅฐ†ๅ‰่€…ๅ‡ 1 ๅฐฑๅฏไปฅไบ†ใ€‚็ฌฌ 3 ๅชๅŠจ็‰ฉ็š„็ดขๅผ•ๆ˜ฏ 2๏ผŒ\\\nไนŸๅฐฑๆ˜ฏ penguinใ€‚็”ฑไบŽไฝ ไธ€่พˆๅญ้ƒฝๅœจ่ทŸๅบๆ•ฐๆ‰“ไบค้“๏ผŒๆ‰€ไปฅไฝ ้œ€่ฆ็”จ่ฟ™็งๆ–นๅผๆฅ่Žทๅพ—ๅŸบๆ•ฐ๏ผŒ\\\nๅช่ฆๅ‡ 1 ๅฐฑ้ƒฝๆžๅฎšไบ†ใ€‚\n\n่ฎฐไฝ: ordinal == ๆœ‰ๅบ๏ผŒไปฅ 1 ๅผ€ๅง‹๏ผ›cardinal == ้šๆœบ้€‰ๅ–, ไปฅ 0 ๅผ€ๅง‹ใ€‚\n\n่ฎฉๆˆ‘ไปฌ็ปƒไน ไธ€ไธ‹ใ€‚ๅฎšไน‰ไธ€ไธชๅŠจ็‰ฉๅˆ—่กจ๏ผŒ็„ถๅŽ่ทŸ็€ๅšๅŽ้ข็š„็ปƒไน ๏ผŒไฝ ้œ€่ฆๅ†™ๅ‡บๆ‰€ๆŒ‡ไฝ็ฝฎ็š„ๅŠจ็‰ฉ\\\nๅ็งฐใ€‚ๅฆ‚ๆžœๆˆ‘็”จ็š„ๆ˜ฏโ€œ1st, 2ndโ€็ญ‰่ฏดๆณ•๏ผŒ้‚ฃ่ฏดๆ˜Žๆˆ‘็”จ็š„ๆ˜ฏๅบๆ•ฐ๏ผŒๆ‰€ไปฅไฝ ้œ€่ฆๅ‡ๅŽป 1ใ€‚ๅฆ‚ๆžœๆˆ‘\\\n็ป™ไฝ ็š„ๆ˜ฏๅŸบๆ•ฐ๏ผˆ0, 1, 2๏ผ‰๏ผŒไฝ ๅช่ฆ็›ดๆŽฅไฝฟ็”จๅณๅฏใ€‚\n.. code-block:: python\n\n animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']\n\n1. The animal at 1.\n2. The 3rd animal.\n3. The 1st animal.\n4. The animal at 3.\n5. The 5th animal.\n6. The animal at 2.\n7. The 6th animal.\n8. The animal at 4.\n\nๅฏนไบŽไธŠ่ฟฐๆฏไธ€ๆก๏ผŒไปฅ่ฟ™ๆ ท็š„ๆ ผๅผๅ†™ๅ‡บไธ€ไธชๅฎŒๆ•ด็š„ๅฅๅญ๏ผšโ€œThe 1st animal is at 0 and is a bear.โ€\n็„ถๅŽๅ€’่ฟ‡ๆฅๅฟต๏ผšโ€œThe animal at 0 is the 1st animal and is a bear.โ€\n\nไฝฟ็”จ python ๆฃ€ๆŸฅไฝ ็š„็ญ”ๆกˆใ€‚\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ไธŠ็ฝ‘ๆœ็ดขไธ€ไธ‹ๅ…ณไบŽๅบๆ•ฐ(ordinal number)ๅ’ŒๅŸบๆ•ฐ(cardinal number)็š„็Ÿฅ่ฏ†ๅนถ้˜…่ฏปไธ€ไธ‹ใ€‚\n2. ไปฅไฝ ๅฏนไบŽ่ฟ™ไบ›ๆ•ฐๅญ—็ฑปๅž‹็š„ไบ†่งฃ๏ผŒ่งฃ้‡Šไธ€ไธ‹ไธบไป€ไนˆไปŠๅนดๆ˜ฏ 2010 ๅนดใ€‚ๆ็คบ๏ผšไฝ ไธ่ƒฝ้šไพฟๆŒ‘้€‰\\\n ๅนดไปฝใ€‚\n3. ๅ†ๅ†™ไธ€ไบ›ๅˆ—่กจ๏ผŒ็”จไธ€ๆ ท็š„ๆ–นๅผไฝœๅ‡บ็ดขๅผ•๏ผŒ็กฎ่ฎค่‡ชๅทฑๅฏไปฅๅœจไธค็งๆ•ฐๅญ—ไน‹้—ดไบ’็›ธ็ฟป่ฏ‘ใ€‚\n4. ไฝฟ็”จ python ๆฃ€ๆŸฅ่‡ชๅทฑ็š„็ญ”ๆกˆใ€‚\n\n.. warning::\n\n ไผšๆœ‰็จ‹ๅบๅ‘˜ๅ‘Š่ฏ‰ไฝ ่ฎฉไฝ ๅŽป้˜…่ฏปไธ€ไธชๅซโ€œDijkstraโ€็š„ไบบๅ†™็š„ๅ…ณไบŽๆ•ฐๅญ—็š„่ฏ้ข˜ใ€‚ๆˆ‘ๅปบ่ฎฎไฝ ่ฟ˜ๆ˜ฏ\\\n ไธ่ฏปไธบๅฆ™ใ€‚้™ค้žไฝ ๅ–œๆฌขๅฌไธ€ไธชๅœจ็ผ–็จ‹่ฟ™ไธ€่กŒๅˆšๅ…ด่ตทๆ—ถๅฐฑๅœๆญขไปŽไบ‹็ผ–็จ‹ไบ†็š„ไบบๅฏนไฝ ๅคงๅ–Šๅคงๅซใ€‚\n\n\n" }, { "alpha_fraction": 0.7387486100196838, "alphanum_fraction": 0.7420417070388794, "avg_line_length": 49.55555725097656, "blob_id": "13f33d29c947ff6960668f1418b0d9f2f5272420", "content_id": "c6e3f121d95f17471a71e09ce5f3e54aac77e225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 79, "num_lines": 36, "path": "/ex43.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 43: You Make A Game\n=============================\n\nYou need to start learning to feed yourself. Hopefully as you have worked\nthrough this book, you have learned that all the information you need is on the\ninternet, you just have to go search for it. The only thing you have been\nmissing are the right words and what to look for when you search. Now you\nshould have a sense of it, so it's about time you struggled through a big\nproject and tried to get it working.\n\nHere are your requirements:\n\n1. Make a different game from the one I made.\n2. Use more than one file, and use ``import`` to use them. Make\n sure you know what that is.\n3. Use *one class per room* and give the classes names that fit\n their purpose. Like ``GoldRoom``, ``KoiPondRoom``.\n4. Your runner will need to know about these rooms, so make a class that runs\n them and knows about them. There's plenty of ways to do this, but consider\n having each room return what room is next or setting a variable of what\n room is next.\n\nOther than that I leave it to you. Spend a whole week on this and\nmake it the best game you can. Use classes, functions, dicts, lists anything\nyou can to make it nice. The purpose of this lesson is to teach you how to \nstructure classes that need other classes inside other files.\n\nRemember, I'm not telling you *exactly* how to do this because you have to\ndo this yourself. Go figure it out. Programming is problem solving, and\nthat means trying things, experimenting, failing, scrapping your work, and\ntrying again. When you get stuck, ask for help and show people your code.\nIf they are mean to you, ignore them, focus on the people who are not mean\nand offer to help. Keep working it and cleaning it until it's good, then\nshow it some more.\n\nGood luck, and see you in a week with your game.\n\n\n" }, { "alpha_fraction": 0.6854220032691956, "alphanum_fraction": 0.695652186870575, "avg_line_length": 30.239999771118164, "blob_id": "b9751490e1013b058847f4dd573d39be8c2c7ca0", "content_id": "cfd63a400f82a99df13b087a5c2a0baf3a2ffe95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 782, "license_type": "no_license", "max_line_length": 105, "num_lines": 25, "path": "/ex24.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 24: More Practice\n**************************\n\nYou are getting to the end of this section. You should have enough\nPython \"under your fingers\" to move onto learning about how programming\nreally works, but you should do some more practice. This exercise is\nlonger and all about building up stamina. The next exercise will be\nsimilar. Do them, get them exactly right, and do your checks.\n\n.. literalinclude:: ex/ex24.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex24.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Make sure to do your checks: read it backwards, read it out loud, put comments above confusing parts.\n2. Break the file on purpose, then run it to see what kinds of errors you get. Make sure you can fix it.\n\n" }, { "alpha_fraction": 0.6924606561660767, "alphanum_fraction": 0.6971614360809326, "avg_line_length": 40.89393997192383, "blob_id": "0b02fb4383b31a7ba1efb38d3cd57095b77ecc53", "content_id": "305bf4ce5e6676e21ca63816956fa522d0ed1cdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5531, "license_type": "no_license", "max_line_length": 96, "num_lines": 132, "path": "/ex42.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 42: Gothons Are Getting Classy\n***************************************\n\nWhile it's fun to put functions inside of dictionaries, you'd think there'd\nbe something in Python that does this for you. There is: the ``class``\nkeyword. Using ``class`` is how you create an even more awesome \"dict with functions\"\nthan the one you made in the last exercise. Classes have all sorts of powerful\nfeatures and uses that I could never go into in this book. Instead, you'll just\nuse them like they're fancy dictionaries with functions.\n\nA programming language that uses classes is called \"Object Oriented\nProgramming\". This is an old style of programming where you make \"things\" and\nyou \"tell\" those things to do work. You've been doing a lot of this. A whole\nlot. You just didn't know it. Remember when you were doing this:\n\n.. code-block:: python\n\n stuff = ['Test', 'This', 'Out']\n print ' '.join(stuff)\n\nYou were actually using classes. The variable ``stuff`` is actually a ``list class``.\nThe ``' '.join(stuff)`` is calling the ``join`` function of the string ``' '``\n(just an empty space) is *also* a class, a string class. It's all classes!\n\nWell, and objects, but let's just skip that word for now. You'll learn what\nthose are after you make some classes. How do you make classes? Very similar\nto how you made the ``ROOMS`` dict, but easier:\n\n.. code-block:: python\n\n class TheThing(object):\n\n def __init__(self):\n self.number = 0\n\n def some_function(self):\n print \"I got called.\"\n\n def add_me_up(self, more):\n self.number += more\n return self.number\n\n # two different things\n a = TheThing()\n b = TheThing()\n\n a.some_function()\n b.some_function()\n\n print a.add_me_up(20)\n print a.add_me_up(20)\n print b.add_me_up(30)\n print b.add_me_up(30)\n\n print a.number\n print b.number\n\n.. warning::\n\n Alright, this is where you start learning about \"warts\". Python is an old\n language with lots of really ugly obnoxious pieces that were bad decisions.\n To cover up these bad decisions they make new bad decisions and then yell\n at people to adopt the new bad decisions. The phrase ``class\n TheThing(object)`` is an example of a bad decision. I won't get into it\n right here, but don't worry about why your class has to have\n ``(object)`` after its name. Just always type it this way or other\n Python programmers will yell at you. We'll get into why later.\n\nYou see that ``self`` in the parameters? You know what that is? That's right,\nit's the \"extra\" parameter that Python creates so you can type\n``a.some_function()`` and then it will translate *that* to really be\n``some_function(a)``. Why use ``self``? Your function has no idea what you\nare calling any one \"instance\" of ``TheThing`` or another, you just use a\ngeneric name ``self``. That way you can write your function and it will always\nwork.\n\nYou could actually use another name rather than ``self`` but then every Python\nprogrammer on the planet would hate you, so don't. Only jerks change things\nlike that and I taught you better. Be nice to people who have to read what you\nwrite because ten years later all code is horrible.\n\nNext, see the ``__init__`` function? That is how you set up a Python class with\ninternal variables. You can set them on ``self`` with the ``.`` (period) just\nlike I'll show you here. See also how we then use this in ``add_me_up()`` later\nwhich lets you add to the ``self.number`` you created. Later you can see how\nwe use this to add to our number and print it.\n\nClasses are very powerful, so you should read everything\nyou can about them and play with them. You actually know how to use them, you just have\nto try it. In fact, I want to play some guitar right now so I'm not going\nto give you an exercise to type. You're going to write an exercise\nusing classes.\n\nHere's how we'd do exercise 41 using classes instead of the thing\nwe created:\n\n.. literalinclude:: ex/ex42.py\n :linenos:\n\n\nWhat You Should See\n===================\n\nThe output from this version of the game should be exactly the same as the\nprevious version. In fact you'll notice that some of the code is nearly\nthe same. Compare this new version of the game with\nthe last one so you understand the changes that were made. Key things to\nreally get are:\n\n1. How you made a ``class Game(object)`` and put functions inside it.\n2. How ``__init__`` is a special intialization method that sets up important variables.\n3. How you added functions *to* the class by indenting them so they were deeper under\n the ``class`` keyword. This is important so study carefully how indentation creates\n the class structure.\n4. How you indented again to put the contents of the functions under their names.\n5. How colons are being used.\n6. The concept of ``self`` and how it's used in ``__init__``, ``play``,\n and ``death``.\n7. Go find out what ``getattr`` does inside ``play`` so that you \n understand what's going on with the operation of ``play``. In fact, try\n doing this by hand inside Python to really get it.\n8. How a ``Game`` was created at the end and then told to ``play()`` and how that\n got everything started.\n\n\nExtra Credit\n============\n\n1. Find out what the ``__dict__`` is and figure out how to get at it.\n2. Add some rooms to make sure you know how to work with a class.\n3. Create a two-class version of this, where one is the ``Map`` and the other is the ``Engine``.\n Hint: ``play`` goes in the ``Engine``.\n\n" }, { "alpha_fraction": 0.5428156852722168, "alphanum_fraction": 0.5435413718223572, "avg_line_length": 16.43037986755371, "blob_id": "444ce506b470df41a2b98fd0fa6b84527dadf427", "content_id": "d5e42c29c9515b2e30380bc4b474c224421b81b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2756, "license_type": "no_license", "max_line_length": 79, "num_lines": 158, "path": "/ex37.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 37: Symbol Review\n**************************\n\nIt's time to review the symbols and Python words you know, and to try\nto pick up a few more for the next few lessons. What I've done here\nis written out all the Python symbols and keywords that are important\nto know.\n\nIn this lesson take each keyword, and first try to write out \nwhat it does from memory. Next, search online for it and see what\nit really does. It may be hard because some of these are going to\nbe impossible to search for, but keep trying.\n\nIf you get one of these wrong from memory, write up an index card with the\ncorrect definition and try to \"correct\" your memory. If you just didn't know\nabout it, write it down, and save it for later.\n\nFinally, use each of these in a small Python program, or as many as\nyou can get done. The key here is to find out what the symbol does,\nmake sure you got it right, correct it if you do not, then use it\nto lock it in.\n\n\nKeywords\n========\n\n* ``and``\n* ``del``\n* ``from``\n* ``not``\n* ``while``\n* ``as``\n* ``elif``\n* ``global``\n* ``or``\n* ``with``\n* ``assert``\n* ``else``\n* ``if``\n* ``pass``\n* ``yield``\n* ``break``\n* ``except``\n* ``import``\n* ``print``\n* ``class``\n* ``exec``\n* ``in``\n* ``raise``\n* ``continue``\n* ``finally``\n* ``is``\n* ``return``\n* ``def``\n* ``for``\n* ``lambda``\n* ``try``\n\n\nData Types\n==========\n\nFor data types, write out what makes up each one. For example, with strings\nwrite out how you create a string. For numbers write out a few numbers.\n\n* ``True``\n* ``False``\n* ``None``\n* ``strings``\n* ``numbers``\n* ``floats``\n* ``lists``\n\n\nString Escapes Sequences\n========================\n\nFor string escape sequences, use them in strings to make sure they do\nwhat you think they do.\n\n* ``\\\\``\n* ``\\'``\n* ``\\\"``\n* ``\\a``\n* ``\\b``\n* ``\\f``\n* ``\\n``\n* ``\\r``\n* ``\\t``\n* ``\\v``\n\nString Formats\n==============\n\nSame thing for string formats: use them in some strings to know what\nthey do.\n\n\n* ``%d``\n* ``%i``\n* ``%o``\n* ``%u``\n* ``%x``\n* ``%X``\n* ``%e``\n* ``%E``\n* ``%f``\n* ``%F``\n* ``%g``\n* ``%G``\n* ``%c``\n* ``%r``\n* ``%s``\n* ``%%``\n\n\nOperators\n=========\n\nSome of these may be unfamiliar to you, but look them up anyway. Find out what\nthey do, and if you still can't figure it out, save it for later.\n\n* ``+``\n* ``-``\n* ``*``\n* ``**``\n* ``/``\n* ``//``\n* ``%``\n* ``<``\n* ``>``\n* ``<=``\n* ``>=``\n* ``==``\n* ``!=``\n* ``<>``\n* ``( )``\n* ``[ ]``\n* ``{ }``\n* ``@``\n* ``,``\n* ``:``\n* ``.``\n* ``=``\n* ``;``\n* ``+=``\n* ``-=``\n* ``*=``\n* ``/=``\n* ``//=``\n* ``%=``\n* ``**=``\n\n\nSpend about a week on this, but if you finish faster that's great. The point\nis to try to get coverage on all these symbols and make sure they are locked in\nyour head. What's also important is to find out what you *do not* know so you\ncan fix it later.\n\n\n" }, { "alpha_fraction": 0.5515320301055908, "alphanum_fraction": 0.5626741051673889, "avg_line_length": 17.842105865478516, "blob_id": "e4383e95de0d630fdf41ada8480aa89da4f16a3f", "content_id": "f962ed4db4b2873ecdc39c0451247ba4802acb24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 359, "license_type": "no_license", "max_line_length": 79, "num_lines": 19, "path": "/ex9.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 9: Printing, Printing, Printing\n****************************************\n\n.. literalinclude:: ex/ex9.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex9.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Do your checks of your work, write down your mistakes, try not to make them \n on the next exercise.\n\n" }, { "alpha_fraction": 0.6853994727134705, "alphanum_fraction": 0.7035812735557556, "avg_line_length": 27.34375, "blob_id": "0874a809b5da08b9597de293eeb17e324b35b6c3", "content_id": "937ce441e1dd23b53f5cba39a9d098ecffb28e47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3877, "license_type": "no_license", "max_line_length": 62, "num_lines": 64, "path": "/cn/ex15.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 15: ่ฏปๅ–ๆ–‡ไปถ\n**************************\n\nไฝ ๅทฒ็ปๅญฆ่ฟ‡ไบ† ``raw_input`` ๅ’Œ ``argv``\\๏ผŒ่ฟ™ไบ›ๆ˜ฏไฝ ๅผ€ๅง‹ๅญฆไน ่ฏปๅ–ๆ–‡ไปถ็š„ๅฟ…ๅค‡ๅŸบ็ก€ใ€‚ไฝ \\\nๅฏ่ƒฝ้œ€่ฆๅคšๅคšๅฎž้ชŒๆ‰่ƒฝๆ˜Ž็™ฝๅฎƒ็š„ๅทฅไฝœๅŽŸ็†๏ผŒๆ‰€ไปฅไฝ ่ฆ็ป†ๅฟƒๅš็ปƒไน ๏ผŒๅนถไธ”ไป”็ป†ๆฃ€ๆŸฅ็ป“ๆžœใ€‚ๅค„็†\\\nๆ–‡ไปถ้œ€่ฆ้žๅธธไป”็ป†๏ผŒๅฆ‚ๆžœไธไป”็ป†็š„่ฏ๏ผŒไฝ ๅฏ่ƒฝไผšๅงๆœ‰็”จ็š„ๆ–‡ไปถๅผ„ๅๆˆ–่€…ๆธ…็ฉบใ€‚ๅฏผ่‡ดๅ‰ๅŠŸๅฐฝๅผƒใ€‚\n\n่ฟ™่Š‚็ปƒไน ๆถ‰ๅŠๅˆฐๅ†™ไธคไธชๆ–‡ไปถใ€‚ไธ€ไธชๆญฃๅธธ็š„ ``ex15.py`` ๆ–‡ไปถ๏ผŒๅฆๅค–ไธ€ไธชๆ˜ฏ ``ex15_sample.txt``\\๏ผŒ\\\n็ฌฌไบŒไธชๆ–‡ไปถๅนถไธๆ˜ฏ่„šๆœฌ๏ผŒ่€Œๆ˜ฏไพ›ไฝ ็š„่„šๆœฌ่ฏปๅ–็š„ๆ–‡ๆœฌๆ–‡ไปถใ€‚ไปฅไธ‹ๆ˜ฏๅŽ่€…็š„ๅ†…ๅฎน๏ผš\n\n.. literalinclude:: ex/ex15_sample.txt\n\nๆˆ‘ไปฌ่ฆๅš็š„ๆ˜ฏๆŠŠ่ฏฅๆ–‡ไปถ็”จๆˆ‘ไปฌ็š„่„šๆœฌโ€œๆ‰“ๅผ€(open)โ€๏ผŒ็„ถๅŽๆ‰“ๅฐๅ‡บๆฅใ€‚็„ถ่€ŒๆŠŠๆ–‡ไปถๅ\\\n``ex15_sample.txt`` ๅ†™ๆญป(hardcode)ๅœจไปฃ็ ไธญไธๆ˜ฏไธ€ไธชๅฅฝไธปๆ„๏ผŒ่ฟ™ไบ›ไฟกๆฏๅบ”่ฏฅๆ˜ฏ็”จๆˆท\\\n่พ“ๅ…ฅ็š„ๆ‰ๅฏนใ€‚ๅฆ‚ๆžœๆˆ‘ไปฌ็ขฐๅˆฐๅ…ถไป–ๆ–‡ไปถ่ฆๅค„็†๏ผŒๅ†™ๆญป็š„ๆ–‡ไปถๅๅฐฑไผš็ป™ไฝ ๅธฆๆฅ้บป็ƒฆไบ†ใ€‚ๆˆ‘ไปฌ็š„\\\n่งฃๅ†ณๆ–นๆกˆๆ˜ฏไฝฟ็”จ ``argv`` ๅ’Œ ``raw_input`` ๆฅไปŽ็”จๆˆท่Žทๅ–ไฟกๆฏ๏ผŒไปŽ่€Œ็Ÿฅ้“ๅ“ชไบ›ๆ–‡ไปถ่ฏฅ\\\n่ขซๅค„็†ใ€‚\n\n.. literalinclude:: ex/ex15.py\n :linenos:\n\n่ฟ™ไธช่„šๆœฌไธญๆœ‰ไธ€ไบ›ๆ–ฐๅฅ‡็š„็Žฉๆ„๏ผŒๆˆ‘ไปฌๆฅๅฟซ้€Ÿๅœฐ่ฟ‡ไธ€้๏ผš\n\nไปฃ็ ็š„ 1-3 ่กŒไฝฟ็”จ ``argv`` ๆฅ่Žทๅ–ๆ–‡ไปถๅ๏ผŒ่ฟ™ไธชไฝ ๅบ”่ฏฅๅทฒ็ป็†Ÿๆ‚‰ไบ†ใ€‚ๆŽฅไธ‹ๆฅ็ฌฌ 5 ่กŒๆˆ‘\\\nไปฌ็œ‹ๅˆฐ ``open`` ่ฟ™ไธชๆ–ฐๅ‘ฝไปคใ€‚็Žฐๅœจ่ฏทๅœจๅ‘ฝไปค่กŒ่ฟ่กŒ ``pydoc open`` ๆฅ่ฏป่ฏปๅฎƒ็š„่ฏดๆ˜Žใ€‚\\\nไฝ ๅฏไปฅ็œ‹ๅˆฐๅฎƒๅ’Œไฝ ่‡ชๅทฑ็š„่„šๆœฌใ€ๆˆ–่€… ``raw_input`` ๅ‘ฝไปค็ฑปไผผ๏ผŒๅฎƒไผšๆŽฅๅ—ไธ€ไธชๅ‚ๆ•ฐ๏ผŒๅนถ\\\nไธ”่ฟ”ๅ›žไธ€ไธชๅ€ผ๏ผŒไฝ ๅฏไปฅๅฐ†่ฟ™ไธชๅ€ผ่ต‹ไบˆไธ€ไธชๅ˜้‡ใ€‚่ฟ™ๅฐฑๆ˜ฏไฝ ๆ‰“ๅผ€ๆ–‡ไปถ็š„่ฟ‡็จ‹ใ€‚\n\n็ฌฌ 7 ่กŒๆˆ‘ไปฌๆ‰“ๅฐไบ†ไธ€ๅฐ่กŒ๏ผŒไฝ†ๅœจ็ฌฌ 8 ่กŒๆˆ‘ไปฌ็œ‹ๅˆฐไบ†ๆ–ฐๅฅ‡็š„ไธœ่ฅฟใ€‚ๆˆ‘ไปฌๅœจ ``txt`` ไธŠ่ฐƒ\\\n็”จไบ†ไธ€ไธชๅ‡ฝๆ•ฐใ€‚ไฝ ไปŽ open ่Žทๅพ—็š„ไธœ่ฅฟๆ˜ฏไธ€ไธช ``file`` (ๆ–‡ไปถ)๏ผŒๆ–‡ไปถๆœฌ่บซไนŸๆ”ฏๆŒไธ€ไบ›\\\nๅ‘ฝไปคใ€‚ๅฎƒๆŽฅๅ—ๅ‘ฝไปค็š„ๆ–นๅผๆ˜ฏไฝฟ็”จๅฅ็‚น ``.`` (่‹ฑๆ–‡็งฐไฝœ dot ๆˆ–่€… period)๏ผŒ็ดง่ทŸ็€ไฝ ็š„\\\nๅ‘ฝไปค๏ผŒ็„ถๅŽๆ˜ฏ็ฑปไผผ ``open`` ๅ’Œ ``raw_input`` ไธ€ๆ ท็š„ๅ‚ๆ•ฐใ€‚ไธๅŒ็‚นๆ˜ฏ๏ผšๅฝ“ไฝ ่ฏด\n``txt.read`` ๆ—ถ๏ผŒไฝ ็š„ๆ„ๆ€ๅ…ถๅฎžๆ˜ฏ๏ผšโ€œๅ˜ฟ txt๏ผๆ‰ง่กŒไฝ ็š„ read ๅ‘ฝไปค๏ผŒๆ— ้œ€ไปปไฝ•ๅ‚ๆ•ฐ๏ผโ€\n\n่„šๆœฌๅ‰ฉไธ‹็š„้ƒจๅˆ†ๅŸบๆœฌๅทฎไธๅคš๏ผŒไธ่ฟ‡ๆˆ‘ๅฐฑๆŠŠๅ‰ฉไธ‹็š„ๅˆ†ๆžไฝœไธบๅŠ ๅˆ†ไน ้ข˜็•™็ป™ไฝ ่‡ชๅทฑไบ†ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๆˆ‘็š„่„šๆœฌๅซ \"ex15_sample.txt\"๏ผŒไปฅไธ‹ๆ˜ฏๆ‰ง่กŒ็ป“ๆžœ๏ผš\n\n.. literalinclude:: ex/ex15.txt\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n่ฟ™่Š‚็š„้šพๅบฆ่ทจ่ถŠๆœ‰็‚นๅคง๏ผŒๆ‰€ไปฅไฝ ่ฆๅฐฝ้‡ๅšๅฅฝ่ฟ™่Š‚ๅŠ ๅˆ†ไน ้ข˜๏ผŒ็„ถๅŽๅ†็ปง็ปญๅŽ้ข็š„็ซ ่Š‚ใ€‚\n\n1. ๅœจๆฏไธ€่กŒ็š„ไธŠ้ข็”จๆณจ่งฃ่ฏดๆ˜Ž่ฟ™ไธ€่กŒ็š„็”จ้€”ใ€‚\n2. ๅฆ‚ๆžœไฝ ไธ็กฎๅฎš็ญ”ๆกˆ๏ผŒๅฐฑ้—ฎๅˆซไบบ๏ผŒๆˆ–่€…ไธŠ็ฝ‘ๆœ็ดขใ€‚ๅคง้ƒจๅˆ†ๆ—ถๅ€™๏ผŒๅช่ฆๆœ็ดข \"python\" ๅŠ ไธŠ\\\n ไฝ ่ฆๆœ็š„ไธœ่ฅฟๅฐฑ่ƒฝๅพ—ๅˆฐไฝ ่ฆ็š„็ญ”ๆกˆใ€‚ๆฏ”ๅฆ‚ๆœ็ดขไธ€ไธ‹โ€œpython openโ€ใ€‚\n3. ๆˆ‘ไฝฟ็”จไบ†โ€œๅ‘ฝไปคโ€่ฟ™ไธช่ฏ๏ผŒไธ่ฟ‡ๅฎž้™…ไธŠๅฎƒไปฌ็š„ๅๅญ—ๆ˜ฏโ€œๅ‡ฝๆ•ฐ๏ผˆfunction๏ผ‰โ€ๅ’Œโ€œๆ–นๆณ•๏ผˆmethod๏ผ‰ใ€‚\\\n ไธŠ็ฝ‘ๆœ็ดขไธ€ไธ‹่ฟ™ไธค่€…็š„ๆ„ไน‰ๅ’ŒๅŒบๅˆซใ€‚็œ‹ไธๆ˜Ž็™ฝไนŸๆฒกๅ…ณ็ณป๏ผŒ่ฟทๅคฑๅœจๅˆซ็š„็จ‹ๅบๅ‘˜็š„็Ÿฅ่ฏ†ๆตทๆด‹\\\n ้‡Œๆ˜ฏๅพˆๆญฃๅธธ็š„ไธ€ไปถไบ‹ๆƒ…ใ€‚\n4. ๅˆ ๆމ 10-15 ่กŒไฝฟ็”จๅˆฐ ``raw_input`` ็š„้ƒจๅˆ†๏ผŒๅ†่ฟ่กŒไธ€้่„šๆœฌใ€‚\n5. ๅชๆ˜ฏ็”จ ``raw_input`` ๅ†™่ฟ™ไธช่„šๆœฌ๏ผŒๆƒณๆƒณ้‚ฃ็งๅพ—ๅˆฐๆ–‡ไปถๅ็งฐ็š„ๆ–นๆณ•ๆ›ดๅฅฝ๏ผŒไปฅๅŠไธบไป€ไนˆใ€‚\n6. ่ฟ่กŒ ``pydoc file`` ๅ‘ไธ‹ๆปšๅŠจ็›ดๅˆฐ็œ‹่ง ``read()`` ๅ‘ฝไปค๏ผˆๅ‡ฝๆ•ฐ/ๆ–นๆณ•๏ผ‰ใ€‚็œ‹ๅˆฐๅพˆๅคš\\\n ๅˆซ็š„ๅ‘ฝไปคไบ†ๅง๏ผŒไฝ ๅฏไปฅๆ‰พๅ‡ ๆก่ฏ•่ฏ•็œ‹ใ€‚ไธ้œ€่ฆ็œ‹้‚ฃไบ›ๅŒ…ๅซ ``__`` ๏ผˆไธคไธชไธ‹ๅˆ’็บฟ๏ผ‰็š„\\\n ๅ‘ฝไปค๏ผŒ่ฟ™ไบ›ๅชๆ˜ฏๅžƒๅœพ่€Œๅทฒใ€‚\n7. ๅ†ๆฌก่ฟ่กŒ ``python`` ๅœจๅ‘ฝไปค่กŒไธ‹ไฝฟ็”จ ``open`` ๆ‰“ๅผ€ไธ€ไธชๆ–‡ไปถ๏ผŒ่ฟ™็ง open ๅ’Œ read\n ็š„ๆ–นๆณ•ไนŸๅ€ผๅพ—ไฝ ไธ€ๅญฆใ€‚\n8. ่ฎฉไฝ ็š„่„šๆœฌ้’ˆๅฏน ``txt`` and ``txt_again`` ๅ˜้‡ๆ‰ง่กŒไธ€ไธ‹ ``close()`` ๏ผŒๅค„็†ๅฎŒ\\\n ๆ–‡ไปถๅŽไฝ ้œ€่ฆๅฐ†ๅ…ถๅ…ณ้—ญ๏ผŒ่ฟ™ๆ˜ฏๅพˆ้‡่ฆ็š„ไธ€็‚นใ€‚\n\n" }, { "alpha_fraction": 0.6400580406188965, "alphanum_fraction": 0.6545718312263489, "avg_line_length": 18.11111068725586, "blob_id": "68087b8e94de1bdf5bff01bcb408bd891a648a7d", "content_id": "731a78db804bfb187e311b7401ba428f17374654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 54, "num_lines": 36, "path": "/cn/ex14.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 14: ๆ็คบๅ’Œไผ ้€’\n**********************************\n\n่ฎฉๆˆ‘ไปฌไฝฟ็”จ ``argv`` ๅ’Œ ``raw_input`` ไธ€่ตทๆฅๅ‘็”จๆˆทๆไธ€ไบ›็‰นๅˆซ็š„้—ฎ้ข˜ใ€‚ไธ‹ไธ€่Š‚ไน ้ข˜\\\nไฝ ไผšๅญฆไน ๅฆ‚ไฝ•่ฏปๅ†™ๆ–‡ไปถ๏ผŒ่ฟ™่Š‚็ปƒไน ๆ˜ฏไธ‹่Š‚็š„ๅŸบ็ก€ใ€‚ๅœจ่ฟ™้“ไน ้ข˜้‡Œๆˆ‘ไปฌๅฐ†็”จ็•ฅๅพฎไธๅŒ็š„ๆ–นๆณ•\\\nไฝฟ็”จ ``raw_input``\\๏ผŒ่ฎฉๅฎƒๆ‰“ๅ‡บไธ€ไธช็ฎ€ๅ•็š„ ``>`` ไฝœไธบๆ็คบ็ฌฆใ€‚่ฟ™ๅ’Œไธ€ไบ›ๆธธๆˆไธญ็š„ๆ–นๅผ\\\n็ฑปไผผ๏ผŒไพ‹ๅฆ‚ Zork ๆˆ–่€… Adventure ่ฟ™ไธคๆฌพๆธธๆˆใ€‚\n\n\n.. literalinclude:: ex/ex14.py\n :linenos:\n\n\nๆˆ‘ไปฌๅฐ†็”จๆˆทๆ็คบ็ฌฆ่ฎพ็ฝฎไธบๅ˜้‡ ``prompt``\\๏ผŒ่ฟ™ๆ ทๆˆ‘ไปฌๅฐฑไธ้œ€่ฆๅœจๆฏๆฌก็”จๅˆฐ ``raw_input`` \nๆ—ถ้‡ๅค่พ“ๅ…ฅๆ็คบ็”จๆˆท็š„ๅญ—็ฌฆไบ†ใ€‚่€Œไธ”ๅฆ‚ๆžœไฝ ่ฆๅฐ†ๆ็คบ็ฌฆไฟฎๆ”นๆˆๅˆซ็š„ๅญ—ไธฒ๏ผŒไฝ ๅช่ฆๆ”นไธ€ไธชไฝ็ฝฎ\\\nๅฐฑๅฏไปฅไบ†ใ€‚\n\n้žๅธธ้กบๆ‰‹ๅงใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๅฝ“ไฝ ่ฟ่กŒ่ฟ™ไธช่„šๆœฌๆ—ถ๏ผŒ่ฎฐไฝไฝ ้œ€่ฆๆŠŠไฝ ็š„ๅๅญ—่ต‹็ป™่ฟ™ไธช่„šๆœฌ๏ผŒ่ฎฉ argv ๅ‚ๆ•ฐๆŽฅๆ”ถๅˆฐไฝ ็š„ๅ็งฐใ€‚\n\n.. literalinclude:: ex/ex14.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๆŸฅไธ€ไธ‹ Zork ๅ’Œ Adventure ๆ˜ฏไธคไธชๆ€Žๆ ท็š„ๆธธๆˆใ€‚\n ็œ‹็œ‹่ƒฝไธ่ƒฝไธ‹่ฝฝๅˆฐไธ€็‰ˆ๏ผŒ็„ถๅŽ็Žฉ็Žฉ็œ‹ใ€‚\n2. ๅฐ† ``prompt`` ๅ˜้‡ๆ”นๆˆๅฎŒๅ…จไธๅŒ็š„ๅ†…ๅฎนๅ†่ฟ่กŒไธ€้ใ€‚\n3. ็ป™ไฝ ็š„่„šๆœฌๅ†ๆทปๅŠ ไธ€ไธชๅ‚ๆ•ฐ๏ผŒ่ฎฉไฝ ็š„็จ‹ๅบ็”จๅˆฐ่ฟ™ไธชๅ‚ๆ•ฐใ€‚\n4. ็กฎ่ฎคไฝ ๅผ„ๆ‡‚ไบ†ไธ‰ไธชๅผ•ๅท ``\"\"\"`` ๅฏไปฅๅฎšไน‰ๅคš่กŒๅญ—็ฌฆไธฒ๏ผŒ่€Œ ``%`` ๆ˜ฏๅญ—็ฌฆไธฒ็š„ๆ ผๅผๅŒ–ๅทฅๅ…ทใ€‚\n\n" }, { "alpha_fraction": 0.7024303674697876, "alphanum_fraction": 0.7071725130081177, "avg_line_length": 37.29545593261719, "blob_id": "244fce8ef5bbc54aabf25036ca5678a4ccb8f690", "content_id": "acfc77928b2d66cb43a27f363de7aaeb02bb2349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1687, "license_type": "no_license", "max_line_length": 107, "num_lines": 44, "path": "/ex5.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 5: More Variables And Printing\n***************************************\n\nNow we'll do even more typing of variables and printing them out. This time\nwe'll use something called a \"format string\". Every time you put ``\"``\n(double-quotes) around a piece of text you have been making a *string*. A string\nis how you make something that your program might give to a human. You print\nthem, save them to files, send them to web servers, all sorts of things.\n\nStrings are really handy, so in this exercise you will learn how to\nmake strings that have variables embedded in them. You embed variables\ninside a string by using specialized format sequences and then putting\nthe variables at the end with a special syntax that tells Python, \"Hey,\nthis is a format string, put these variables in there.\"\n\nAs usual, just type this in even if you do not understand it and make it\nexactly the same.\n\n.. literalinclude:: ex/ex5.py\n :linenos:\n\n\n.. warning::\n\n Remember to put ``# -- coding: utf-8 --`` at the top if you use non-ASCII\n characters and get an encoding error.\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex5.txt\n :language: console\n\n\n\nExtra Credit\n============\n\n1. Change all the variables so there isn't the ``my_`` in front. \n Make sure you change the name everywhere, not just where you used ``=`` to set them.\n2. Try more format characters. ``%r`` is a very useful one. It's like saying \"print this no matter what\".\n3. Search online for all of the Python format characters.\n4. Try to write some variables that convert the inches and pounds to centimeters and kilos.\n Do not just type in the measurements. Work out the math in Python.\n\n\n" }, { "alpha_fraction": 0.7763041853904724, "alphanum_fraction": 0.7774830460548401, "avg_line_length": 53.69355010986328, "blob_id": "29c37c5bd983364af80ab79c0dabc85746f3cf21", "content_id": "5fbfebe7b3b30a437f2567a5502d941ea7b377d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3393, "license_type": "no_license", "max_line_length": 124, "num_lines": 62, "path": "/advice.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Advice From An Old Programmer\n*****************************\n\nYou've finished this book and have decided to continue with programming.\nMaybe it will be a career for you, or maybe it will be a hobby. You'll need\nsome advice to make sure you continue on the right path, and get the most\nenjoyment out of your newly chosen activity.\n\nI've been programming for a very long time. So long that it's incredibly\nboring to me. At the time that I wrote this book, I knew about 20 programming\nlanguages and could learn new ones in about a day to a week depending on how\nweird they were. Eventually though this just became boring and couldn't hold\nmy interest anymore. This doesn't mean I think programming *is* boring, or\nthat *you* will think it's boring, only that *I* find it uninteresting\nat this point in my journey.\n\nWhat I discovered after this journey of learning is that it's not the languages that\nmatter but what you do with them. Actually, I always knew that, but I'd\nget distracted by the languages and forget it periodically. Now I never forget\nit, and neither should you.\n\nWhich programming language you learn and use doesn't matter. Do *not* get\nsucked into the religion surrounding programming languages as that will only\nblind you to their true purpose of being your tool for doing interesting\nthings.\n\nProgramming as an intellectual activity is the *only* art form that allows you\nto create interactive art. You can create projects that other people can play\nwith, and you can talk to them indirectly. No other art form is quite this\ninteractive. Movies flow to the audience in one direction. Paintings do not\nmove. Code goes both ways.\n\nProgramming as a profession is only moderately interesting. It can be a good\njob, but you could make about the same money and be happier running a fast food joint. You're much better off using code as\nyour secret weapon in another profession.\n\nPeople who can code in the world of technology companies are a dime a dozen and\nget no respect. People who can code in biology, medicine, government,\nsociology, physics, history, and mathematics are respected and can do amazing\nthings to advance those disciplines.\n\nOf course, all of this advice is pointless. If you liked learning to write\nsoftware with this book, you should try to use it to improve your life any way\nyou can. Go out and explore this weird wonderful new intellectual pursuit that\nbarely anyone in the last 50 years has been able to explore. Might as well\nenjoy it while you can.\n\nFinally, I'll say that learning to create software changes you and makes you\ndifferent. Not better or worse, just different. You may find that people\ntreat you harshly because you can create software, maybe using words like\n\"nerd\". Maybe you'll find that because you can dissect their logic that they\nhate arguing with you. You may even find that simply knowing how a computer\nworks makes you annoying and weird to them.\n\nTo this I have just one piece of advice: they can go to hell. The world needs\nmore weird people who know how things work and who love to figure it all out.\nWhen they treat you like this, just remember that this is *your* journey, not\ntheirs. Being different is not a crime, and people who tell you it is are\njust jealous that you've picked up a skill they never in their wildest dreams\ncould acquire.\n\nYou can code. They cannot. That is pretty damn cool.\n\n\n" }, { "alpha_fraction": 0.6120648384094238, "alphanum_fraction": 0.6444921493530273, "avg_line_length": 36.088497161865234, "blob_id": "93e332c95cb8b6da56d2764034fc9e0be4706d49", "content_id": "13a289ade034fc2bf0c13551c508162b8508a893", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4194, "license_type": "no_license", "max_line_length": 94, "num_lines": 113, "path": "/ex28.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 28: Boolean Practice\n*****************************\n\nThe logic combinations you learned from the last exercise are called \"boolean\"\nlogic expressions. Boolean logic is used *everywhere* in programming. They\nare essential fundamental parts of computation and knowing them very well is\nakin to knowing your scales in music.\n\nIn this exercise you will be taking the logic exercises you memorized and start\ntrying them out in ``python``. Take each of these logic problems, and write\nout what you think the answer will be. In each case it will be either True or\nFalse. Once you have the answers written down, you will start ``python`` in\nyour terminal and type them in to confirm your answers.\n\n1. ``True and True``\n2. ``False and True``\n3. ``1 == 1 and 2 == 1``\n4. ``\"test\" == \"test\"``\n5. ``1 == 1 or 2 != 1``\n6. ``True and 1 == 1``\n7. ``False and 0 != 0``\n8. ``True or 1 == 1``\n9. ``\"test\" == \"testing\"``\n10. ``1 != 0 and 2 == 1``\n11. ``\"test\" != \"testing\"``\n12. ``\"test\" == 1``\n13. ``not (True and False)``\n14. ``not (1 == 1 and 0 != 1)``\n15. ``not (10 == 1 or 1000 == 1000)``\n16. ``not (1 != 10 or 3 == 4)``\n17. ``not (\"testing\" == \"testing\" and \"Zed\" == \"Cool Guy\")``\n18. ``1 == 1 and not (\"testing\" == 1 or 1 == 0)``\n19. ``\"chunky\" == \"bacon\" and not (3 == 4 or 3 == 3)``\n20. ``3 == 3 and not (\"testing\" == \"testing\" or \"Python\" == \"Fun\")``\n\nI will also give you a trick to help you figure out the more complicated\nones toward the end.\n\nWhenever you see these boolean logic statements, you can solve them easily by\nthis simple process:\n\n1. Find equality test (== or !=) and replace it with its truth.\n2. Find each and/or inside a parenthesis and solve those first.\n3. Find each not and invert it.\n4. Find any remaining and/or and solve it.\n5. When you are done you should have True or False.\n\nI will demonstrate with a variation on #20:\n\n\n.. code-block:: python\n\n 3 != 4 and not (\"testing\" != \"test\" or \"Python\" == \"Python\")\n\nHere's me going through each of the steps and showing you the translation until\nI've boiled it down to a single result:\n\n1. Solve each equality test:\n a. ``3 != 4`` is ``True``: ``True and not (\"testing\" != \"test\" or \"Python\" == \"Python\")``\n b. ``\"testing\" != \"test\"`` is ``True``: ``True and not (True or \"Python\" == \"Python\")``\n c. ``\"Python\" == \"Python\"``: ``True and not (True or True)``\n2. Find each and/or in parenthesis ():\n a. ``(True or True)`` is True: ``True and not (True)``\n3. Find each not and invert it:\n a. ``not (True)`` is False: ``True and False``\n4. Find any remaining and/or and solve them:\n a. ``True and False`` is False\n\nWith that we're done and know the result is False.\n\n.. warning::\n\n The more complicated ones may seem *very* hard at first. You should be\n able to give a good first stab at solving them, but do not get discouraged.\n I'm just getting you primed for more of these \"logic gymnastics\" so that\n later cool stuff is much easier. Just stick with it, and keep track of\n what you get wrong, but do not worry that it's not getting in your head\n quite yet. It'll come.\n\n\n\nWhat You Should See\n===================\n\nAfter you have tried to guess at these, this is what your session with ``python``\nmight look like:\n\n\n.. code-block:: pycon\n\n $ python\n Python 2.5.1 (r251:54863, Feb 6 2009, 19:02:12) \n [GCC 4.0.1 (Apple Inc. build 5465)] on darwin\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> True and True\n True\n >>> 1 == 1 and 2 == 2\n True\n\n\nExtra Credit\n============\n\n1. There are a lot of operators in Python similar to ``!=`` and ``==``. Try to\n find out as many \"equality operators\" as you can. They should be like: ``<``\n or ``<=``.\n2. Write out the names of each of these equality operators. For example, I call\n ``!=`` \"not equal\".\n3. Play with the ``python`` by typing out new boolean operators, and before you\n hit enter try to shout out what it is. Do not think about it, just the\n first thing that comes to mind. Write it down then hit enter, and keep\n track of how many you get right and wrong.\n4. Throw away that piece of paper from #3 away so you do not accidentally try to use it later.\n\n\n\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.74210524559021, "avg_line_length": 45.050506591796875, "blob_id": "2023075ca80adb33c4ffd2b79a6e4d41b6782160", "content_id": "6687f762695db532886530a4ebeb24c4bb9a2e00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4560, "license_type": "no_license", "max_line_length": 92, "num_lines": 99, "path": "/ex47.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 47: Automated Testing\n******************************\n\nHaving to type commands into your game over and over to make sure\nit's working is annoying. Wouldn't it be better to write little pieces of code\nthat test your code? Then when you make a change, or add a new thing to your\nprogram, you just \"run your tests\" and the tests make sure things are still \nworking. These automated tests won't catch all your bugs, but they will \ncut down on the time you spend repeatedly typing and running your code.\n\nEvery exercise after this one will not have a ``What You Should See`` section,\nbut instead it will have a ``What You Should Test`` section. You will be writing\nautomated tests for all of your code starting now, and this will hopefully make you\nan even better programmer.\n\nI won't try to explain why you should write automated tests. I will only say that,\nyou are trying to be a programmer, and programmers automate boring and tedious \ntasks. Testing a piece of software is definitely boring and tedious, so you might\nas well write a little bit of code to do it for you.\n\nThat should be all the explanation you need because *your* reason for writing unit\ntests is to make your brain stronger. You have gone through this book writing code\nto do things. Now you are going to take the next leap and write code that\nknows about other code you have written. This process of writing a test that runs\nsome code you have written *forces* you to understand clearly what you have just written.\nIt solidifies in your brain exactly what it does and why it works and gives you a new\nlevel of attention to detail.\n\nWriting A Test Case\n===================\n\nWe're going to take a very simple piece of code and write one simple test. We're\ngoing to base this little test on a new project from your project skeleton.\n\nFirst, make a ``ex47`` project from your project skeleton. Make sure you\ndo it right and rename the module and get that first ``tests/ex47_tests.py`` test\nfile going right. Also make sure nose runs this test file. *IMPORTANT* make sure you\nalso delete ``tests/skel_tests.pyc`` if it's there.\n\nNext, create a simple file ``ex47/game.py`` where you can put the code to test.\nThis will be a very silly little class that we want to test with this code\nin it:\n\n.. literalinclude:: ex/ex47.py\n :linenos:\n\nOnce you have that file, change unit test skeleton to this:\n\n.. literalinclude:: ex/ex47_tests.py\n :linenos:\n\nThis file imports the ``Room`` class you made in the ``ex47.game``\nmodule so that you can do tests on it. There are then a set of tests that are functions\nstarting with ``test_``. Inside each test case there's a bit of code that makes\na Room or a set of Rooms, and then makes sure the rooms work the way you expect them\nto work. It tests out the basic room features, then the paths, then tries out a whole\nmap.\n\nThe important functions here are ``assert_equal`` which makes sure that variables\nyou have set or paths you have built in a ``Room`` are actually what you think they are.\nIf you get the wrong result, then ``nosetests`` will print out an error message\nso you can go figure it out.\n\n\nTesting Guidelines\n==================\n\nFollow these general loose set of guidelines when making your tests:\n\n1. Test files go in ``tests/`` and are named ``BLAH_tests.py`` otherwise ``nosetests``\n won't run them. This also keeps your tests from clashing with your other code.\n2. Write one test file for each module you make.\n3. Keep your test cases (functions) short, but do not worry if they are a bit\n messy. Test cases are usually kind of messy.\n4. Even though test cases are messy, try to keep them clean and remove any repetitive\n code you can. Create helper functions that get rid of duplicate code. You will thank\n me later when you make a change and then have to change your tests. Duplicated\n code will make changing your tests more difficult.\n5. Finally, do not get too attached to your tests. Sometimes, the best way to redesign\n something is to just delete it, the tests, and start over.\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex47.txt\n :language: console\n\nThat's what you should see if everything is working right. Try causing an error to see what\nthat looks like and then fix it.\n\nExtra Credit\n============\n\n\n1. Go read about nosetests more, and also read about alternatives.\n2. Learn about Python's \"doc tests\" and see if you like them better.\n3. Make your Room more advanced, and then use it to rebuild your game yet again\n but this time, unit test as you go.\n\n" }, { "alpha_fraction": 0.7206462025642395, "alphanum_fraction": 0.7263311743736267, "avg_line_length": 46.5, "blob_id": "a1c2e3843f359b9a52d59cc4c29b71fd146d2e4e", "content_id": "ffc21fbf8cb2686e06cf09505d9b21b225b9e2ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 17766, "license_type": "no_license", "max_line_length": 190, "num_lines": 374, "path": "/ex51.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 51: Getting Input From A Browser\n*****************************************\n\n\nWhile it's exciting to see the browser display \"Hello World\", it's even\nmore exciting to let the user submit text to your application from\na form. In this exercise we'll improve our starter web application\nusing forms and storing information about the user into their \"session\".\n\nHow The Web Works\n=================\n\nTime for some boring stuff. You need to understand a bit more about how the web\nworks before you can make a form. This description isn't complete, but it's\naccurate and will help you figure out what might be going wrong with your\napplication. Also, creating forms will be easier if you know\nwhat they do.\n\nI'll start with a simple diagram that shows you the different parts of a web\nrequest and how the information flows:\n\n.. figure:: _static/http_request_diagram.*\n :align: center\n \n\nI've labeled the lines with letters so I can walk you through a regular request process:\n\n1. You type in the url ``http://learnpythonthehardway.org/`` into your browser and it\n sends the request out on ``line (A)`` to your computer's network interface.\n2. Your request goes out over the internet on ``line (B)`` and then to the remote\n computer on ``line (C)`` where my server accepts the request.\n3. Once my computer accepts it, my web application gets it on ``line (D)``, and my\n Python code runs the ``index.GET`` handler.\n4. The response comes out of my Python server when I ``return`` it, and goes back\n to your browser over ``line (D)`` again.\n5. The server running this site takes the response off ``line (D)`` then sends it back\n over the internet on ``line (C)``.\n6. The response from the server then comes off the internet on ``line (B)``, and your\n computer's network interface hands it to your browser on ``line (A)``.\n7. Finally, your browser then displays the response.\n\nIn this description there are a few terms you should know so that you have a common\nvocabulary to work with when talking about your web application:\n\nBrowser\n The software that you're probably using every day. Most people don't\n know what it really does, they just call it \"the internet\". Its job is to\n take addresses (like http://learnpythonthehardway.org) you type into the URL\n bar, then use that information to make requests to the server at\n that address.\n\nAddress\n This is normally a URL (Uniform Resource Locator) like http://learnpythonthehardway.org/\n and indicates where a browser should go. The first part ``http`` indicates\n the protocol you want to use, in this case \"Hyper-Text Transport Protocol\". You can\n also try ftp://ibiblio.org/ to see how \"File Transport Protocol\" works. The ``learnpythonthehardway.org``\n part is the \"hostname\", or a human readable address you can remember and which maps\n to a number called an IP address, similar to a telephone number for a computer on the\n Internet. Finally, URLs can have a trailing ``path`` like the ``/book/`` part of\n http://learnpythonthehardway.org/book/ which indicates a file or some resource\n *on* the server to retrieve with a request. There are many other parts, but those are\n the main ones.\n\nConnection\n Once a browser knows what protocol you want to use (http), what server you want to \n talk to (learnpythonthehardway.org), and what resource on that server to get, it\n must make a connection. The browser simply asks your Operating System (OS) to open\n a \"port\" to the computer, usually port 80. When it works the OS hands\n back to your program something that works like a file, but is actually sending\n and receiving bytes over the network wires between your computer and the\n other computer at \"learnpythonthehardway.org\". This is also the same thing\n that happens with http://localhost:8080/ but in this case you're telling\n the browser to connect to your own computer (localhost) and use port 8080\n rather than the default of 80. You could also do http://learnpythonthehardway.org:80/\n and get the same result, except you're explicitly saying to use port 80 instead of\n letting it be that by default.\n\nRequest\n Your browser is connected using the address you gave. Now it needs to\n ask for the resource it wants (or you want) on the remote server. If you\n gave ``/book/`` at the end of the URL, then you want the file (resource)\n at /book/, and most servers will use the real file /book/index.html but\n pretend it doesn't exist. What the browser does to get this resource is\n send a *request* to the server. I won't\n get into exactly how it does this, but just understand that it has to \n send something to query the server for the request. The interesting thing is that\n these \"resources\" don't have to be files. For instance, when the browser in your \n application asks for something, the server is\n returning something your Python code generated.\n\nServer\n The server is the computer at the end of a browser's\n connection that knows how to answer your browser's requests for files/resources.\n Most web servers just send files, and that's actually the majority of traffic.\n But you're actually building a server in Python that knows how to take \n requests for resources, and then return strings that you craft using Python.\n When you do this crafting, *you* are pretending to be a file to the browser,\n but really it's just code. As you can see from Ex. 50, it also doesn't\n take much code to create a response.\n\nResponse\n This is the HTML (css, javascript, or images) your server wants to\n send back to the browser as the answer to the browser's request. In the case\n of files, it just reads them off the disk and sends them to the browser, but\n it wraps the contents of the disk in a special \"header\" so the browser knows\n what it's getting. In the case of your application, you're still sending \n the same thing, including the header, but you generate that data on the fly\n with your Python code.\n\nThat is the fastest crash course in how a web browser accesses information on servers\non the internet. It should work well enough for you to understand this exercise, but if not, read about it as much as you can until you get\nit. A really good way to do that is to take the diagram, and break different parts\nof the web application you did in Exercise 50. If you can break your web application\nin predictable ways using the diagram, you'll start to understand how it works.\n\n\nHow Forms Work\n==============\n\nThe best way to play with forms is to write some code that accepts\nform data, and then see what you can do. Take your ``bin/app.py``\nfile and make it look like this:\n\n.. literalinclude:: ex/ex51/gothonweb/form_test.py\n :linenos:\n\nRestart it (hit CTRL-c and then run it again) to make sure it loads\nagain, then with your browser go to ``http://localhost:8080/hello``\nwhich should display, \"I just wanted to say Hello, Nobody.\" Next, change\nthe URL in your browser to ``http://localhost:8080/hello?name=Frank`` and\nyou'll see it say \"Hello, Frank.\" Finally, change the ``name=Frank`` part\nto be your name. Now it's saying hello to you.\n\nLet's break down the changes I made to your script.\n\n1. Instead of just a string for ``greeting`` I'm now using ``web.input`` to\n get data from the browser. This function takes a key=value set of defaults,\n parses the ``?name=Frank`` part of the URL you give it, and then returns\n a nice object for you to work with that represents those values.\n2. I then construct the ``greeting`` from the new ``form.name`` attribute of\n the ``form`` object, which should be very familiar to you by now.\n3. Everything else about the file is the same as before.\n\nYou're also not restricted to just one parameter on the URL. Change this\nexample to give two variables like this:\n``http://localhost:8080/hello?name=Frank&greet=Hola``. Then change the code to\nget ``form.name`` and ``form.greet`` like this:\n\n.. code-block:: python\n\n greeting = \"%s, %s\" % (form.greet, form.name)\n\nAfter that, try the URL. Next, leave out the ``&greet=Hola``\npart so that you can see the error you get. Since ``greet`` doesn't have\na default value in ``web.input(name=\"Nobody\")`` then it is a required field.\nNow go back and make it have a default in the ``web.input`` call to see\nhow you fix this. Another thing you can do is set its default to ``greet=None``\nso that you can check if it exists and then give a better error message, like this:\n\n.. code-block:: python\n \n form = web.input(name=\"Nobody\", greet=None)\n\n if form.greet:\n greeting = \"%s, %s\" % (form.greet, form.name)\n return render.index(greeting = greeting)\n else:\n return \"ERROR: greet is required.\"\n\n\nCreating HTML Forms\n===================\n\nPassing the parameters on the URL works, but it's kind of ugly and \nnot easy to use for regular people. What you really want is a \"POST form\", which is a special HTML file that has a ``<form>``\ntag in it. This form will collect information from the user, then\nsend it to your web application just like you did above.\n\nLet's make a quick one so you can see how it works. Here's the new HTML\nfile you need to create, in ``templates/hello_form.html``:\n\n.. literalinclude:: ex/ex51/gothonweb/templates/hello_form.html\n :linenos:\n\nYou should then change ``bin/app.py`` to look like this:\n\n.. literalinclude:: ex/ex51/gothonweb/post_form.py\n :linenos:\n\nOnce you've got those written up, simply restart the web application\nagain and hit it with your browser like before.\n\nThis time you'll get a form asking you for \"A Greeting\" and \"Your Name\".\nWhen you hit the ``Submit`` button on the form, it will give you the\nsame greeting you normally get, but this time look at the URL in your\nbrowser. See how it's ``http://localhost:8080/hello`` even though\nyou sent in parameters.\n\nThe part of the ``hello_form.html`` file that makes this work is the\nline with ``<form action=\"/hello\" method=\"POST\">``. This tells your\nbrowser to:\n\n1. Collect data from the user using the form fields inside the form.\n2. Send them to the server using a ``POST`` type of request, which is\n just another browser request that \"hides\" the form fields.\n3. Send that to the ``/hello`` URL (as shown in the ``action=\"/hello\"`` part).\n\nYou can then see how the two ``<input>`` tags match the names of the variables\nin your new code. Also notice that instead\nof just a ``GET`` method inside ``class index``, I have another method ``POST``.\n\nHow this new application works is:\n\n1. The browser first hits the web application at ``/hello`` but it sends a \n ``GET``, so our ``index.GET`` function runs and returns the ``hello_form``.\n2. You fill out the form in the browser, and the browser does what the ``<form>``\n says and sends the data as a ``POST``.\n3. The web application then runs the ``index.POST`` method rather than the\n ``index.GET`` method to handle this request.\n4. This ``index.POST`` method then does what it normally does to send \n back the hello page like before. There's really nothing new in \n here, it's just moved into a new function.\n\nAs an exercise, go into the ``templates/index.html`` file and add a\nlink *back* to just ``/hello`` so that you can keep filling out the form\nand seeing the results. Make sure you can explain how this link\nworks and how it's letting you cycle between ``templates/index.html`` and\n``templates/hello_form.html`` and what's being run inside this latest\nPython code.\n\n\nCreating A Layout Template\n==========================\n\nWhen you work on your game in the next Exercise, you'll need to make a\nbunch of little HTML pages. Writing a full web page each time will quickly become tedious. Luckily you can create a \"layout\" template, or a kind of shell that will wrap all your other pages\nwith common headers and footers. Good programmers\ntry to reduce repetition, so layouts are essential for being a good programmer.\n\nChange ``templates/index.html`` to be like this:\n\n.. literalinclude:: ex/ex51/gothonweb/templates/index_laid_out.html\n :linenos:\n\nThen change ``templates/hello_form.html`` to be like this:\n\n.. literalinclude:: ex/ex51/gothonweb/templates/hello_form_laid_out.html\n :linenos:\n\nAll we're doing is stripping out the \"boilerplate\" at the top and the\nbottom which is always on every page. We'll put that back into a\nsingle ``templates/layout.html`` file that handles it for us from\nnow on.\n\nOnce you have those changes, create a ``templates/layout.html`` file\nwith this in it:\n\n.. literalinclude:: ex/ex51/gothonweb/templates/layout.html\n :linenos:\n\nThis file looks like a regular template, except that it's going to be\npassed the *contents* of the other templates and used to *wrap* them.\nAnything you put in here doesn't need to be in the other templates.\nYou should also pay attention to how ``$:content`` is written, since\nit's a little different from the other template variables.\n\nThe *final* step is to change the line that makes the ``render`` object to\nbe this:\n\n.. code-block:: python\n\n render = web.template.render('templates/', base=\"layout\")\n\nWhich tells ``lpthw.web`` to use the ``templates/layout.html`` file as the\n*base* template for all the other templates. Restart your application and\nthen try to change the layout in interesting ways, but without changing the\nother templates.\n\n\nWriting Automated Tests For Forms\n=================================\n\nIt's easy to test a web application with your browser by just hitting refresh,\nbut come on, we're programmers here. Why do some repetitive task when we can write some code to test our application? What you're going to do next is\nwrite a little test for your web application form based on what\nyou learned in Exercise 47. If you don't remember Exercise 47, read it again.\n\nYou need to do a bit of setup to make Python let you load your ``bin/app.py``\nfile for testing. When we get to Exercise 52 you'll change this, but for now\ncreate an empty ``bin/__init__.py`` file so Python thinks ``bin/`` is a directory.\n\nI've also created a simple little function for ``lpthw.web`` that lets you\nassert things about your web application's response, aptly named ``assert_response``.\nCreate the file ``tests/tools.py`` with these contents:\n\n.. literalinclude:: ex/ex51/gothonweb/tests/tools.py\n :linenos:\n\nOnce that's in place you can write your automated test for the last version of the\n``bin/app.py`` file you created. Create a new file named ``tests/app_tests.py``\nwith this:\n\n.. literalinclude:: ex/ex51/gothonweb/tests/app_tests.py\n :linenos:\n\nFinally, use nosetests to run this test setup and test your web application:\n\n.. code-block:: console\n\n $ nosetests\n .\n ----------------------------------------------------------------------\n Ran 1 test in 0.059s\n\n OK\n\nWhat I'm doing here is I'm actually *importing* the whole application from the\n``bin/app.py`` module, then running it manually. The ``lpthw.web`` framework\nhas a very simple API for processing requests, which looks like this:\n\n.. code-block:: python\n\n app.request(localpart='/', method='GET', data=None, host='0.0.0.0:8080',\n headers=None, https=False)\n\nThis means you can pass in the URL as the first parameter, then change the \nmethod of the request, as wellas what form data you send, including the host and headers.\nThis works without running an actual web server so you can do tests with automated tests\nand also use your browser to test a running server.\n\nTo validate responses from this function, use the ``assert_response`` function\nfrom ``tests.tools`` which has:\n\n\n.. code-block:: python\n\n assert_response(resp, contains=None, matches=None, headers=None, status=\"200\")\n\nPass in the response you get from calling ``app.request`` then \nadd things you want checked. Use the ``contains`` parameter to make \nsure that the response contains certain values. Use the ``status``\nparameter to check for certain responses. There's actually quite a lot of\ninformation in this little function so it would be good for you to\nstudy it.\n\nIn the ``tests/app_tests.py`` automated test I'm first making sure the ``/`` URL\nreturns a \"404 Not Found\" response, since it actually doesn't exist. Then I'm\nchecking that ``/hello`` works with both a ``GET`` and ``POST`` form. Following\nthe test should be fairly simple, even if you might not totally know what's going on.\n\nTake some time studying this latest application, especially how the automated testing works. Make sure you understand how I \nimported the application from ``bin/app.py`` and ran it directly for the automated\ntest. This is an important trick that will lead to \nmore learning.\n\n\nExtra Credit\n============\n\n1. Read even more about HTML, and give the simple form a better layout.\n It helps to draw what you want to do on paper and *then*\n implement it with HTML.\n2. This one is hard, but try to figure out how you'd do a file upload form so that you can\n upload an image and save it to the disk.\n3. This is even more mind-numbing, but go find the HTTP RFC (which is the document that\n describes how HTTP works) and read as much of it as you can. It is really boring, \n but comes in handy once in a while.\n4. This will also be really difficult, but see if you can find someone to help you setup\n a web server like Apache, Nginx, or thttpd. Try to serve a couple of your .html\n and .css files with it just to see if you can. Don't worry if you can't, web servers\n kind of suck.\n5. Take a break after this and just try making as many different web applications as\n you can. You should *definitely* read about sessions in ``web.py`` (which is\n the same as ``lpthw.web``) so you can understand how to keep state for a user.\n\n" }, { "alpha_fraction": 0.6917749047279358, "alphanum_fraction": 0.7021645307540894, "avg_line_length": 22.571428298950195, "blob_id": "53bbed7f68020f07c8a480757f3d9e4a86505296", "content_id": "5bedda22570cf24aeae4f097aba036876d8a3e52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 58, "num_lines": 49, "path": "/cn/ex17.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 17: ๆ›ดๅคšๆ–‡ไปถๆ“ไฝœ\n***********************\n\n็Žฐๅœจ่ฎฉๆˆ‘ไปฌๅ†ๅญฆไน ๅ‡ ็งๆ–‡ไปถๆ“ไฝœใ€‚ๆˆ‘ไปฌๅฐ†็ผ–ๅ†™ไธ€ไธช Python ่„šๆœฌ๏ผŒๅฐ†ไธ€ไธชๆ–‡ไปถไธญ็š„ๅ†…ๅฎน\\\nๆ‹ท่ดๅˆฐๅฆๅค–ไธ€ไธชๆ–‡ไปถไธญใ€‚่ฟ™ไธช่„šๆœฌๅพˆ็Ÿญ๏ผŒไธ่ฟ‡ๅฎƒไผš่ฎฉไฝ ๅฏนไบŽๆ–‡ไปถๆ“ไฝœๆœ‰ๆ›ดๅคš็š„ไบ†่งฃใ€‚\n\n.. literalinclude:: ex/ex17.py\n :linenos:\n\nไฝ ๅบ”่ฏฅๅพˆๅฟซๆณจๆ„ๅˆฐไบ†ๆˆ‘ไปฌ ``import`` ไบ†ๅˆไธ€ไธชๅพˆๅฅฝ็”จ็š„ๅ‘ฝไปค ``exists``\\ใ€‚่ฟ™ไธชๅ‘ฝไปค\\\nๅฐ†ๆ–‡ไปถๅๅญ—็ฌฆไธฒไฝœไธบๅ‚ๆ•ฐ๏ผŒๅฆ‚ๆžœๆ–‡ไปถๅญ˜ๅœจ็š„่ฏ๏ผŒๅฎƒๅฐ†่ฟ”ๅ›ž ``True``\\๏ผŒๅฆๅˆ™ๅฐ†่ฟ”ๅ›ž ``False``\\ใ€‚\\\nๅœจๆœฌไนฆ็š„ไธ‹ๅŠ้ƒจๅˆ†๏ผŒๆˆ‘ไปฌๅฐ†ไฝฟ็”จ่ฟ™ไธชๅ‡ฝๆ•ฐๅšๅพˆๅคš็š„ไบ‹ๆƒ…๏ผŒไธ่ฟ‡็Žฐๅœจไฝ ๅบ”่ฏฅๅญฆไผš\\\nๆ€Žๆ ท้€š่ฟ‡ ``import`` ่ฐƒ็”จๅฎƒใ€‚\n\n้€š่ฟ‡ไฝฟ็”จ ``import`` ๏ผŒไฝ ๅฏไปฅๅœจ่‡ชๅทฑไปฃ็ ไธญ็›ดๆŽฅไฝฟ็”จๅ…ถไป–ๆ›ดๅމๅฎณ็š„๏ผˆ้€šๅธธๆ˜ฏ่ฟ™ๆ ท๏ผŒไธ่ฟ‡ไนŸไธ\nๅฐฝ็„ถ๏ผ‰็จ‹ๅบๅ‘˜ๅ†™็š„ๅคง้‡ๅ…่ดนไปฃ็ ๏ผŒ่ฟ™ๆ ทไฝ ๅฐฑไธ้œ€่ฆ้‡ๅ†™ไธ€้ไบ†ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๅ’Œไฝ ๅ‰้ขๅ†™็š„่„šๆœฌไธ€ๆ ท๏ผŒ่ฟ่กŒ่ฏฅ่„šๆœฌ้œ€่ฆไธคไธชๅ‚ๆ•ฐ๏ผŒไธ€ไธชๆ˜ฏๅพ…ๆ‹ท่ด็š„ๆ–‡ไปถ๏ผŒไธ€ไธชๆ˜ฏ่ฆๆ‹ท่ด่‡ณ็š„\\\nๆ–‡ไปถใ€‚ๅฆ‚ๆžœๆˆ‘ไปฌไฝฟ็”จไปฅๅ‰็š„ ``test.txt`` ๆˆ‘ไปฌๅฐ†็œ‹ๅˆฐๅฆ‚ไธ‹็š„็ป“ๆžœ:\n\n.. literalinclude:: ex/ex17.txt\n\n่ฏฅๅ‘ฝไปคๅฏนไบŽไปปไฝ•ๆ–‡ไปถ้ƒฝๅบ”่ฏฅๆ˜ฏๆœ‰ๆ•ˆ็š„ใ€‚่ฏ•่ฏ•ๆ“ไฝœไธ€ไบ›ๅˆซ็š„ๆ–‡ไปถ็œ‹็œ‹็ป“ๆžœใ€‚ไธ่ฟ‡ๅฐๅฟƒๅˆซๆŠŠไฝ ็š„้‡่ฆ\\\nๆ–‡ไปถ็ป™ๅผ„ๅไบ†ใ€‚\n\n.. warning::\n\n ไฝ ็œ‹ๅˆฐๆˆ‘็”จ ``cat`` ่ฟ™ไธชๅ‘ฝไปคไบ†ๅง๏ผŸๅฎƒๅช่ƒฝๅœจ Linux ๅ’Œ OSX ไธ‹้ขไฝฟ็”จ๏ผŒไฝฟ็”จ Windows \n ็š„ๅฐฑๅชๅฅฝ่ทŸไฝ ่ฏดๅฃฐๆŠฑๆญ‰ไบ†ใ€‚\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅ†ๅคš่ฏป่ฏปๅ’Œ ``import`` ็›ธๅ…ณ็š„ๆๆ–™๏ผŒๅฐ† ``python`` ่ฟ่กŒ่ตทๆฅ๏ผŒ่ฏ•่ฏ•่ฟ™ไธ€ๆกๅ‘ฝไปคใ€‚่ฏ•็€็œ‹็œ‹\\\n ่‡ชๅทฑ่ƒฝไธ่ƒฝๆ‘ธๅ‡บ็‚น้—จ้“๏ผŒๅฝ“็„ถไบ†๏ผŒๅณไฝฟๅผ„ไธๆ˜Ž็™ฝไนŸๆฒกๅ…ณ็ณปใ€‚\n2. ่ฟ™ไธช่„šๆœฌ *ๅฎžๅœจๆ˜ฏ* ๆœ‰็‚น็ƒฆไบบใ€‚ๆฒกๅฟ…่ฆๅœจๆ‹ท่ดไน‹ๅ‰้—ฎไธ€้ๆŠŠ๏ผŒๆฒกๅฟ…่ฆๅœจๅฑๅน•ไธŠ่พ“ๅ‡บ้‚ฃไนˆๅคšไธœ่ฅฟใ€‚\\\n ่ฏ•็€ๅˆ ๆމ่„šๆœฌ็š„ไธ€ไบ›ๅŠŸ่ƒฝ๏ผŒ่ฎฉๅฎƒไฝฟ็”จ่ตทๆฅๆ›ดๅŠ ๅ‹ๅฅฝใ€‚\n3. ็œ‹็œ‹ไฝ ่ƒฝๆŠŠ่ฟ™ไธช่„šๆœฌๆ”นๅคš็Ÿญ๏ผŒๆˆ‘ๅฏไปฅๆŠŠๅฎƒๅ†™ๆˆไธ€่กŒใ€‚\n4. ๆˆ‘ไฝฟ็”จไบ†ไธ€ไธชๅซ `cat` ็š„ไธœ่ฅฟ๏ผŒ่ฟ™ไธชๅค่€็š„ๅ‘ฝไปค็š„็”จๅค„ๆ˜ฏๅฐ†ไธคไธชๆ–‡ไปถโ€œ่ฟžๆŽฅ(con*cat*enate)โ€\\\n ๅˆฐไธ€่ตท๏ผŒไธ่ฟ‡ๅฎž้™…ไธŠๅฎƒๆœ€ๅคง็š„็”จ้€”ๆ˜ฏๆ‰“ๅฐๆ–‡ไปถๅ†…ๅฎนๅˆฐๅฑๅน•ไธŠใ€‚ไฝ ๅฏไปฅ้€š่ฟ‡ ``man cat`` \n ๅ‘ฝไปคไบ†่งฃๅˆฐๆ›ดๅคšไฟกๆฏใ€‚\n5. ไฝฟ็”จ Windows ็š„ๅŒๅญฆ๏ผŒไฝ ไปฌๅฏไปฅ็ป™่‡ชๅทฑๆ‰พไธ€ไธช ``cat`` ็š„ๆ›ฟไปฃๅ“ใ€‚ๅ…ณไบŽ ``man`` ็š„ไธœ่ฅฟๅฐฑ\\\n ๅˆซๆƒณๅคชๅคšไบ†๏ผŒWindows ไธ‹ๆฒก่ฟ™ไธชๅ‘ฝไปคใ€‚\n6. ๆ‰พๅ‡บไธบไป€ไนˆไฝ ้œ€่ฆๅœจไปฃ็ ไธญๅ†™ ``output.close()`` ใ€‚\n" }, { "alpha_fraction": 0.6688675284385681, "alphanum_fraction": 0.6740696430206299, "avg_line_length": 34.68571472167969, "blob_id": "a9fee30e4a027a59bb618c026d33911ed99de50f", "content_id": "c137279391c4271de85bda006d633f5471112b3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4998, "license_type": "no_license", "max_line_length": 94, "num_lines": 140, "path": "/ex49.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 49: Making Sentences\n*****************************\n\nWhat we should be able to get from our little game lexicon scanner is a list that\nlooks like this:\n\n.. code-block:: pycon\n\n >>> from ex48 import lexicon\n >>> print lexicon.scan(\"go north\")\n [('verb', 'go'), ('direction', 'north')]\n >>> print lexicon.scan(\"kill the princess\")\n [('verb', 'kill'), ('stop', 'the'), ('noun', 'princess')]\n >>> print lexicon.scan(\"eat the bear\")\n [('verb', 'eat'), ('stop', 'the'), ('noun', 'bear')]\n >>> print lexicon.scan(\"open the door and smack the bear in the nose\")\n [('error', 'open'), ('stop', 'the'), ('noun', 'door'), ('error', 'and'),\n ('error', 'smack'), ('stop', 'the'), ('noun', 'bear'), ('stop', 'in'),\n ('stop', 'the'), ('error', 'nose')]\n >>> \n\nNow let us turn this into something the game can work with, which\nwould be some kind of Sentence class.\n\nIf you remember grade school, a sentence can be a simple structure like:\n\n Subject Verb Object\n\nObviously it gets more complex than that, and you probably did many days of annoying\nsentence graphs for English class. What we want is to turn the above lists of\ntuples into a nice Sentence object that has subject, verb, and object.\n\nMatch And Peek\n==============\n\nTo do this we need four tools:\n\n1. A way to loop through the list of tuples. That's easy.\n2. A way to \"match\" different types of tuples that we expect in our Subject Verb Object setup.\n3. A way to \"peek\" at a potential tuple so we can make some decisions.\n4. A way to \"skip\" things we do not care about, like stop words.\n\nWe use the peek function to say look at the next element in our tuple list, and then\nmatch to take one off and work with it. Let's take a look at a first peek function:\n\n.. code-block:: python\n\n def peek(word_list):\n if word_list:\n word = word_list[0]\n return word[0]\n else:\n return None\n \nVery easy. Now for the match function:\n\n.. code-block:: python\n\n def match(word_list, expecting):\n if word_list:\n word = word_list.pop(0)\n\n if word[0] == expecting:\n return word\n else:\n return None\n else:\n return None\n\nAgain, very easy, and finally our skip function:\n\n.. code-block:: python\n\n def skip(word_list, word_type):\n while peek(word_list) == word_type:\n match(word_list, word_type)\n\nBy now you should be able to figure out what these do. Make sure you understand them.\n\n\nThe Sentence Grammar\n====================\n\nWith our tools we can now begin to build Sentence objects from our list of tuples.\nWhat we do is a process of:\n\n1. Identify the next word with ``peek``.\n2. If that word fits in our grammar, we call a function to handle that part \n of the grammar, say ``parse_subject``.\n3. If it doesn't, we ``raise`` an error, which you will learn about in this lesson.\n4. When we're all done, we should have a Sentence object to work with in our game.\n\nThe best way to demonstrate this is to give you the code to read, but here's where\nthis exercise is different from the previous one: You will write the test for the\nparser code I give you. Rather than giving you the test so you can write the code,\nI will give you the code, and you have to write the test.\n\nHere's the code that I wrote for parsing simple sentences using the ``ex48.lexicon``\nmodule:\n\n.. literalinclude:: ex/ex49.py\n\nA Word On Exceptions\n====================\n\nYou briefly learned about exceptions, but not how to raise them. This code\ndemonstrates how to do that with the ``ParserError`` at the top. Notice\nthat it uses classes to give it the type of ``Exception``. Also notice the use\nof ``raise`` keyword to raise the exception.\n\nIn your tests, you will want to work with these exceptions, which I'll show you how \nto do.\n\n\nWhat You Should Test\n====================\n\nFor Exercise 49 is write a complete test that confirms everything\nin this code is working. That includes making exceptions happen by giving it\nbad sentences.\n\nCheck for an exception by using the function ``assert_raises`` from the nose\ndocumentation. Learn how to use this so you can write a test that is\n*expected* to fail, which is very important in testing. Learn about this\nfunction (and others) by reading the nose documentation.\n\nWhen you are done, you should know how this bit of code works, and how\nto write a test for other people's code even if they do not want you to. Trust me,\nit's a very handy skill to have.\n\nExtra Credit\n============\n\n1. Change the ``parse_`` methods and try to put them into a class rather than \n be just methods. Which design do you like better?\n2. Make the parser more error resistant so that you can avoid annoying your\n users if they type words your lexicon doesn't understand.\n3. Improve the grammar by handling more things like numbers.\n4. Think about how you might use this Sentence class in your game to do more fun\n things with a user's input.\n\n\n" }, { "alpha_fraction": 0.6631578803062439, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 15.928571701049805, "blob_id": "fc6c36171dc18aefc17630423390ae970a672e2b", "content_id": "649544a8e2789947b385640e3d3ee68f4f72e690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 46, "num_lines": 28, "path": "/cn/ex7.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 7: ๆ›ดๅคšๆ‰“ๅฐ\n*************************\n\n็Žฐๅœจๆˆ‘ไปฌๅฐ†ๅšไธ€ๆ‰น็ปƒไน ๏ผŒๅœจ็ปƒไน ็š„่ฟ‡็จ‹ไธญไฝ ้œ€่ฆ้”ฎๅ…ฅไปฃ็ ๏ผŒๅนถไธ”่ฎฉๅฎƒไปฌ่ฟ่กŒ่ตทๆฅใ€‚ๆˆ‘ไธไผš\\\n่งฃ้‡Šๅคชๅคš๏ผŒๅ› ไธบ่ฟ™่Š‚็š„ๅ†…ๅฎน้ƒฝๆ˜ฏไปฅๅ‰็†Ÿๆ‚‰่ฟ‡็š„ใ€‚่ฟ™่Š‚็ปƒไน ็š„็›ฎ็š„ๆ˜ฏๅทฉๅ›บไฝ ๅญฆๅˆฐ็š„ไธœ่ฅฟใ€‚ๆˆ‘ไปฌ\\\nๅ‡ ไธช็ปƒไน ๅŽๅ†่งใ€‚ไธ่ฆ่ทณ่ฟ‡่ฟ™ไบ›ไน ้ข˜ใ€‚ไธ่ฆๅคๅˆถ็ฒ˜่ดด๏ผ\n\n.. literalinclude:: ex/ex7.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex7.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\nๆŽฅไธ‹ๆฅๅ‡ ่Š‚็š„ๅŠ ๅˆ†ไน ้ข˜ๆ˜ฏไธ€ๆ ท็š„ใ€‚\n\n1. ้€†ๅ‘้˜…่ฏป๏ผŒๅœจๆฏไธ€่กŒ็š„ไธŠ้ขๅŠ ไธ€่กŒๆณจ่งฃใ€‚\n2. ๅ€’็€ๆœ—่ฏปๅ‡บๆฅ๏ผŒๆ‰พๅ‡บ่‡ชๅทฑ็š„้”™่ฏฏใ€‚\n3. ไปŽ็Žฐๅœจๅผ€ๅง‹๏ผŒๆŠŠไฝ ็š„้”™่ฏฏ่ฎฐๅฝ•ไธ‹ๆฅ๏ผŒๅ†™ๅœจไธ€ๅผ ็บธไธŠใ€‚\n4. ๅœจๅผ€ๅง‹ไธ‹ไธ€่Š‚ไน ้ข˜ๆ—ถ๏ผŒ้˜…่ฏปไธ€้ไฝ ่ฎฐๅฝ•ไธ‹ๆฅ็š„้”™่ฏฏ๏ผŒๅนถไธ”ๅฐฝ้‡้ฟๅ…ๅœจไธ‹ไธช็ปƒไน ไธญๅ†็ŠฏๅŒๆ ท็š„้”™่ฏฏใ€‚\n5. ่ฎฐไฝ๏ผŒๆฏไธชไบบ้ƒฝไผš็Šฏ้”™่ฏฏใ€‚็จ‹ๅบๅ‘˜ๅ’Œ้ญ”ๆœฏๅธˆไธ€ๆ ท๏ผŒไป–ไปฌๅธŒๆœ›ๅคงๅฎถ่ฎคไธบไป–ไปฌไปŽไธ็Šฏ้”™๏ผŒไธ\\\n ่ฟ‡่ฟ™ๅชๆ˜ฏ่กจ่ฑก่€Œๅทฒ๏ผŒไป–ไปฌๆฏๆ—ถๆฏๅˆป้ƒฝๅœจ็Šฏ้”™ใ€‚\n\n" }, { "alpha_fraction": 0.6901554465293884, "alphanum_fraction": 0.6974093317985535, "avg_line_length": 21.952381134033203, "blob_id": "dfa475d4285a3669d9317e9a5e111b2d377a07e8", "content_id": "56006cced43ba68a4c96452e800edc9b4c706ae2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2137, "license_type": "no_license", "max_line_length": 56, "num_lines": 42, "path": "/cn/ex6.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 6: ๅญ—็ฌฆไธฒ(string)ๅ’Œๆ–‡ๆœฌ\n****************************\n\n่™ฝ็„ถไฝ ๅทฒ็ปๅœจ็จ‹ๅบไธญๅ†™่ฟ‡ๅญ—็ฌฆไธฒไบ†๏ผŒไฝ ่ฟ˜ๆฒกๅญฆ่ฟ‡ๅฎƒไปฌ็š„็”จๅค„ใ€‚ๅœจ่ฟ™็ซ ไน ้ข˜ไธญๆˆ‘ไปฌๅฐ†ไฝฟ็”จๅคๆ‚\\\n็š„ๅญ—็ฌฆไธฒๆฅๅปบ็ซ‹ไธ€็ณปๅˆ—็š„ๅ˜้‡๏ผŒไปŽไธญไฝ ๅฐ†ๅญฆๅˆฐๅฎƒไปฌ็š„็”จ้€”ใ€‚้ฆ–ๅ…ˆๆˆ‘ไปฌ่งฃ้‡Šไธ€ไธ‹ๅญ—็ฌฆไธฒๆ˜ฏไป€ไนˆ\nไธœ่ฅฟใ€‚\n\nๅญ—็ฌฆไธฒ้€šๅธธๆ˜ฏๆŒ‡ไฝ ๆƒณ่ฆๅฑ•็คบ็ป™ๅˆซไบบ็š„ใ€ๆˆ–่€…ๆ˜ฏไฝ ๆƒณ่ฆไปŽ็จ‹ๅบ้‡Œโ€œๅฏผๅ‡บโ€็š„ไธ€ๅฐๆฎตๅญ—็ฌฆใ€‚Python\nๅฏไปฅ้€š่ฟ‡ๆ–‡ๆœฌ้‡Œ็š„ๅŒๅผ•ๅท ``\"`` ๆˆ–่€…ๅ•ๅผ•ๅท ``'`` ่ฏ†ๅˆซๅ‡บๅญ—็ฌฆไธฒๆฅใ€‚่ฟ™ๅœจไฝ ไปฅๅ‰็š„ ``print``\n็ปƒไน ไธญไฝ ๅทฒ็ป่ง่ฟ‡ๅพˆๅคšๆฌกไบ†ใ€‚ๅฆ‚ๆžœไฝ ๆŠŠๅ•ๅผ•ๅทๆˆ–่€…ๅŒๅผ•ๅทๆ‹ฌ่ตทๆฅ็š„ๆ–‡ๆœฌๆ”พๅˆฐ ``print`` ๅŽ้ข๏ผŒ\\ \nๅฎƒไปฌๅฐฑไผš่ขซ python ๆ‰“ๅฐๅ‡บๆฅใ€‚\n\nๅญ—็ฌฆไธฒๅฏไปฅๅŒ…ๅซๆ ผๅผๅŒ–ๅญ—็ฌฆ ``%s``\\๏ผŒ่ฟ™ไธชไฝ ไน‹ๅ‰ไนŸ่ง่ฟ‡็š„ใ€‚ไฝ ๅช่ฆๅฐ†ๆ ผๅผๅŒ–็š„ๅ˜้‡ๆ”พๅˆฐๅญ—็ฌฆไธฒ\\\nไธญ๏ผŒๅ†็ดง่ทŸ็€ไธ€ไธช็™พๅˆ†ๅท ``%`` (percent)๏ผŒๅ†็ดง่ทŸ็€ๅ˜้‡ๅๅณๅฏใ€‚ๅ”ฏไธ€่ฆๆณจๆ„็š„ๅœฐๆ–น๏ผŒๆ˜ฏๅฆ‚ๆžœไฝ ๆƒณ่ฆ\\\nๅœจๅญ—็ฌฆไธฒไธญ้€š่ฟ‡ๆ ผๅผๅŒ–ๅญ—็ฌฆๆ”พๅ…ฅๅคšไธชๅ˜้‡็š„ๆ—ถๅ€™๏ผŒไฝ ้œ€่ฆๅฐ†ๅ˜้‡ๆ”พๅˆฐ ``( )`` ๅœ†ๆ‹ฌๅท(parenthesis)\\\nไธญ๏ผŒ่€Œไธ”ๅ˜้‡ไน‹้—ด็”จ ``,`` ้€—ๅท(comma)้š”ๅผ€ใ€‚ๅฐฑๅƒไฝ ้€›ๅ•†ๅบ—่ฏดโ€œๆˆ‘่ฆไนฐ็‰›ๅฅถใ€้ขๅŒ…ใ€้ธก่›‹ใ€ๅ…ซๅฎ็ฒฅโ€ไธ€ๆ ท๏ผŒ\\\nๅชไธ่ฟ‡็จ‹ๅบๅ‘˜่ฏด็š„ๆ˜ฏ\"(milk, eggs, bread, soup)\"ใ€‚\n\nๆˆ‘ไปฌๅฐ†้”ฎๅ…ฅๅคง้‡็š„ๅญ—็ฌฆไธฒใ€ๅ˜้‡ใ€ๅ’Œๆ ผๅผๅŒ–ๅญ—็ฌฆ๏ผŒๅนถไธ”ๅฐ†ๅฎƒไปฌๆ‰“ๅฐๅ‡บๆฅใ€‚ๆˆ‘ไปฌ่ฟ˜ๅฐ†็ปƒไน ไฝฟ็”จ็ฎ€ๅ†™\\\n็š„ๅ˜้‡ๅใ€‚็จ‹ๅบๅ‘˜ๅ–œๆฌขไฝฟ็”จๆผไบบ็š„้šพๅบฆ็š„็ฎ€ๅ†™ๆฅ่Š‚็บฆๆ‰“ๅญ—ๆ—ถ้—ด๏ผŒๆ‰€ไปฅๆˆ‘ไปฌ็Žฐๅœจๅฐฑๆๆ—ฉๅญฆไผš่ฟ™ไธช๏ผŒ่ฟ™ๆ ทไฝ \\\nๅฐฑ่ƒฝ่ฏปๆ‡‚ๅนถไธ”ๅ†™ๅ‡บ่ฟ™ไบ›ไธœ่ฅฟไบ†ใ€‚\n\n\n.. literalinclude:: ex/ex6.py\n :linenos:\n\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex6.txt\n :linenos:\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ้€š่ฏป็จ‹ๅบ๏ผŒๅœจๆฏไธ€่กŒ็š„ไธŠ้ขๅ†™ไธ€่กŒๆณจ่งฃ๏ผŒ็ป™่‡ชๅทฑ่งฃ้‡Šไธ€ไธ‹่ฟ™ไธ€่กŒ็š„ไฝœ็”จใ€‚\n2. ๆ‰พๅˆฐๆ‰€ๆœ‰็š„\"ๅญ—็ฌฆไธฒๅŒ…ๅซๅญ—็ฌฆไธฒ\"็š„ไฝ็ฝฎ๏ผŒๆ€ปๅ…ฑๆœ‰ๅ››ไธชไฝ็ฝฎใ€‚\n3. ไฝ ็กฎๅฎšๅชๆœ‰ๅ››ไธชไฝ็ฝฎๅ—๏ผŸไฝ ๆ€Žไนˆ็Ÿฅ้“็š„๏ผŸๆฒกๅ‡†ๆˆ‘ๅœจ้ช—ไฝ ๅ‘ขใ€‚\n4. ่งฃ้‡Šไธ€ไธ‹ไธบไป€ไนˆ ``w`` ๅ’Œ ``e`` ็”จ ``+`` ่ฟž่ตทๆฅๅฐฑๅฏไปฅ็”Ÿๆˆไธ€ไธชๆ›ด้•ฟ็š„ๅญ—็ฌฆไธฒใ€‚\n\n" }, { "alpha_fraction": 0.5929487347602844, "alphanum_fraction": 0.6105769276618958, "avg_line_length": 19.766666412353516, "blob_id": "6754b2439736553ae69c3b6c419d4aeaedc41cc2", "content_id": "ecbc2f4a892df161cd7e661692fde42b5c20e47e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/cn/ex20.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 20: ๅ‡ฝๆ•ฐๅ’Œๆ–‡ไปถ\n********************************\n\nๅ›žๅฟ†ไธ€ไธ‹ๅ‡ฝๆ•ฐ็š„่ฆ็‚น๏ผŒ็„ถๅŽไธ€่พนๅš่ฟ™่Š‚็ปƒไน ๏ผŒไธ€่พนๆณจๆ„ไธ€ไธ‹ๅ‡ฝๆ•ฐๅ’Œๆ–‡ไปถๆ˜ฏๅฆ‚ไฝ•\\\nๅœจไธ€่ตทๅไฝœๅ‘ๆŒฅไฝœ็”จ็š„ใ€‚\n\n.. literalinclude:: ex/ex20.py\n :linenos:\n\n็‰นๅˆซๆณจๆ„ไธ€ไธ‹๏ผŒๆฏๆฌก่ฟ่กŒ ``print_a_line`` ๆ—ถ๏ผŒๆˆ‘ไปฌๆ˜ฏๆ€Žๆ ทไผ ้€’ๅฝ“ๅ‰็š„่กŒๅท\\\nไฟกๆฏ็š„ใ€‚\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex20.txt\n \n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ้€š่ฏป่„šๆœฌ๏ผŒๅœจๆฏ่กŒไน‹ๅ‰ๅŠ ไธŠๆณจ่งฃ๏ผŒไปฅ็†่งฃ่„šๆœฌ้‡Œๅ‘็”Ÿ็š„ไบ‹ๆƒ…ใ€‚\n2. ๆฏๆฌก ``print_a_line`` ่ฟ่กŒๆ—ถ๏ผŒไฝ ้ƒฝไผ ้€’ไบ†ไธ€ไธชๅซ ``current_line`` ็š„ๅ˜้‡ใ€‚\\\n ๅœจๆฏๆฌก่ฐƒ็”จๅ‡ฝๆ•ฐๆ—ถ๏ผŒๆ‰“ๅฐๅ‡บ ``current_line`` ็š„่‡ณ๏ผŒ่ทŸ่ธชไธ€ไธ‹ๅฎƒๅœจ\n ``print_a_line`` ไธญๆ˜ฏๆ€Žๆ ทๅ˜ๆˆ ``line_count`` ็š„ใ€‚\n3. ๆ‰พๅ‡บ่„šๆœฌไธญๆฏไธ€ไธช็”จๅˆฐๅ‡ฝๆ•ฐ็š„ๅœฐๆ–นใ€‚ๆฃ€ๆŸฅ ``def`` ไธ€่กŒ๏ผŒ็กฎ่ฎคๅ‚ๆ•ฐๆฒกๆœ‰็”จ้”™ใ€‚\n4. ไธŠ็ฝ‘็ ”็ฉถไธ€ไธ‹ ``file`` ไธญ็š„ ``seek`` ๅ‡ฝๆ•ฐๆ˜ฏๅšไป€ไนˆ็”จ็š„ใ€‚่ฏ•็€่ฟ่กŒ\n ``pydoc file`` ็œ‹็œ‹่ƒฝไธ่ƒฝๅญฆๅˆฐๆ›ดๅคšใ€‚\n5. ็ ”็ฉถไธ€ไธ‹ ``+=`` ่ฟ™ไธช็ฎ€ๅ†™ๆ“ไฝœ็ฌฆ็š„ไฝœ็”จ๏ผŒๅ†™ไธ€ไธช่„šๆœฌ๏ผŒๆŠŠ่ฟ™ไธชๆ“ไฝœ็ฌฆ็”จๅœจ้‡Œ่พน่ฏ•ไธ€ไธ‹ใ€‚\n\n" }, { "alpha_fraction": 0.6895043849945068, "alphanum_fraction": 0.6997084617614746, "avg_line_length": 18.571428298950195, "blob_id": "41a14d0f368f246db728dba813f239812fc6f06a", "content_id": "b9dd26cf1b512f118848e1ef0acedf000805fd95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 59, "num_lines": 35, "path": "/cn/ex5.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 5: ๆ›ดๅคš็š„ๅ˜้‡ๅ’Œๆ‰“ๅฐ\n************************\n\nๆˆ‘ไปฌ็Žฐๅœจ่ฆ้”ฎๅ…ฅๆ›ดๅคš็š„ๅ˜้‡ๅนถไธ”ๆŠŠๅฎƒไปฌๆ‰“ๅฐๅ‡บๆฅใ€‚่ฟ™ๆฌกๆˆ‘ไปฌๅฐ†ไฝฟ็”จไธ€ไธชๅซโ€œๆ ผๅผๅŒ–\\\nๅญ—็ฌฆไธฒ(format string)โ€็š„ไธœ่ฅฟ. ๆฏไธ€ๆฌกไฝ ไฝฟ็”จ ``\"`` ๆŠŠไธ€ไบ›ๆ–‡ๆœฌๅผ•็”จ่ตทๆฅ๏ผŒไฝ ๅฐฑๅปบ็ซ‹ไบ†ไธ€ไธชๅญ—็ฌฆไธฒใ€‚ \nๅญ—็ฌฆไธฒๆ˜ฏ็จ‹ๅบๅฐ†ไฟกๆฏๅฑ•็คบ็ป™ไบบ็š„ๆ–นๅผใ€‚ไฝ ๅฏไปฅๆ‰“ๅฐๅฎƒไปฌ๏ผŒๅฏไปฅๅฐ†ๅฎƒไปฌๅ†™ๅ…ฅๆ–‡ไปถ๏ผŒ่ฟ˜\\\nๅฏไปฅๅฐ†ๅฎƒไปฌๅ‘้€็ป™็ฝ‘็ซ™ๆœๅŠกๅ™จ๏ผŒๅพˆๅคšไบ‹ๆƒ…้ƒฝๆ˜ฏ้€š่ฟ‡ๅญ—็ฌฆไธฒไบคๆตๅฎž็Žฐ็š„ใ€‚\n\nๅญ—็ฌฆไธฒๆ˜ฏ้žๅธธๅฅฝ็”จ็š„ไธœ่ฅฟ๏ผŒๆ‰€ไปฅๅ†่ฟ™ไธช็ปƒไน ไธญไฝ ๅฐ†ๅญฆไผšๅฆ‚ไฝ•ๅˆ›ๅปบๅŒ…ๅซๅ˜้‡ๅ†…ๅฎน็š„ๅญ—\\\n็ฌฆไธฒใ€‚ไฝฟ็”จไธ“้—จ็š„ๆ ผๅผๅ’Œ่ฏญๆณ•ๆŠŠๅ˜้‡็š„ๅ†…ๅฎนๆ”พๅˆฐๅญ—็ฌฆไธฒ้‡Œ๏ผŒ็›ธๅฝ“ไบŽๆฅๅ‘Š่ฏ‰ python ๏ผšโ€œๅ˜ฟ๏ผŒ\\ \n่ฟ™ๆ˜ฏไธ€ไธชๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒ๏ผŒๆŠŠ่ฟ™ไบ›ๅ˜้‡ๆ”พๅˆฐ้‚ฃๅ‡ ไธชไฝ็ฝฎใ€‚โ€\n\nไธ€ๆ ท็š„๏ผŒๅณไฝฟไฝ ่ฏปไธๆ‡‚่ฟ™ไบ›ๅ†…ๅฎน๏ผŒๅช่ฆไธ€ๅญ—ไธๅทฎๅœฐ้”ฎๅ…ฅๅฐฑๅฏไปฅไบ†ใ€‚\n\n.. literalinclude:: ex/ex5.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n================\n\n.. literalinclude:: ex/ex5.txt\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n========\n\n1. ไฟฎๆ”นๆ‰€ๆœ‰็š„ๅ˜้‡ๅๅญ—๏ผŒๆŠŠๅฎƒไปฌๅ‰้ข็š„``my_``ๅŽปๆމใ€‚็กฎ่ฎคๅฐ†ๆฏไธ€ไธชๅœฐๆ–น็š„้ƒฝๆ”นๆމ๏ผŒ\\ \n ไธๅชๆ˜ฏไฝ ไฝฟ็”จ``=``่ต‹ๅ€ผ่ฟ‡็š„ๅœฐๆ–นใ€‚\n2. ่ฏ•็€ไฝฟ็”จๆ›ดๅคš็š„ๆ ผๅผๅŒ–ๅญ—็ฌฆใ€‚ไพ‹ๅฆ‚ ``%r`` ๅฐฑๆ˜ฏๆ˜ฏ้žๅธธๆœ‰็”จ็š„ไธ€ไธช๏ผŒๅฎƒ็š„ๅซไน‰ๆ˜ฏโ€œไธ็ฎกไป€ไนˆ้ƒฝ\\\n ๆ‰“ๅฐๅ‡บๆฅโ€ใ€‚\n3. ๅœจ็ฝ‘ไธŠๆœ็ดขๆ‰€ๆœ‰็š„ Python ๆ ผๅผๅŒ–ๅญ—็ฌฆใ€‚\n4. ่ฏ•็€ไฝฟ็”จๅ˜้‡ๅฐ†่‹ฑๅฏธๅ’Œ็ฃ…่ฝฌๆขๆˆๅŽ˜็ฑณๅ’Œๅƒๅ…‹ใ€‚ไธ่ฆ็›ดๆŽฅ้”ฎๅ…ฅ็ญ”ๆกˆใ€‚ไฝฟ็”จ Python ็š„\\\n ่ฎก็ฎ—ๅŠŸ่ƒฝๆฅๅฎŒๆˆใ€‚\n\n" }, { "alpha_fraction": 0.7062900066375732, "alphanum_fraction": 0.7150852680206299, "avg_line_length": 45.88750076293945, "blob_id": "058f183b6c5d762cf9f16b2c412c59a4c2a37297", "content_id": "2463f2d6cbe4adfa266e3983eaccfe37d2649a6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3752, "license_type": "no_license", "max_line_length": 83, "num_lines": 80, "path": "/ex15.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 15: Reading Files\n**************************\n\nEverything you've learned about ``raw_input`` and ``argv`` is so you can start\nreading files. You may have to play with this exercise the most to understand\nwhat's going on, so do it carefully and remember your checks. Working with\nfiles is an easy way to *erase your work* if you are not careful.\n\nThis exercise involves writing two files. One is your usual ``ex15.py`` file\nthat you will run, but the *other* is named ``ex15_sample.txt``. This second\nfile isn't a script but a plain text file we'll be reading in our script.\nHere are the contents of that file:\n\n.. literalinclude:: ex/ex15_sample.txt\n\nWhat we want to do is \"open\" that file in our script and print it out. However,\nwe do not want to just \"hard code\" the name ``ex15_sample.txt`` into our script.\n\"Hard coding\" means putting some bit of information that should\ncome from the user as a string right in our program. That's bad because we want\nit to load other files later. The solution is to use ``argv`` and ``raw_input``\nto ask the user what file they want instead of \"hard coding\" the file's name.\n\n.. literalinclude:: ex/ex15.py\n :linenos:\n\nA few fancy things are going on in this file, so let's break it down real\nquick:\n\nLine 1-3 should be a familiar use of ``argv`` to get a filename. Next we have line\n5 where we use a new command ``open``. Right now, run ``pydoc open`` and read\nthe instructions. Notice how like your own scripts and ``raw_input``, it takes a\nparameter and returns a value you can set to your own variable. You just opened\na file.\n\nLine 7 we print a little line, but on line 8 we have something very new and\nexciting. We call a function on ``txt``. What you got back from open is a\n``file``, and it's also got commands you can give it. You give a file a command\nby using the ``.`` (dot or period), the name of the command, and parameters.\nJust like with ``open`` and ``raw_input``. The difference is that when you say\n``txt.read()`` you are saying, \"Hey txt! Do your read command with no\nparameters!\"\n\nThe remainder of the file is more of the same, but we'll leave the analysis\nto you in the extra credit.\n\n\nWhat You Should See\n===================\n\nI made a file called \"ex15_sample.txt\" and ran my script.\n\n.. literalinclude:: ex/ex15.txt\n :language: console\n\nExtra Credit\n============\n\nThis is a big jump so be sure you do this extra credit as best you can before\nmoving on.\n\n1. Above each line write out in English what that line does. \n2. If you are not sure ask someone for help or search online.\n Many times searching for \"python THING\" will find answers\n for what that THING does in Python. Try searching for \"python open\".\n3. I used the name \"commands\" here, but they are also called \"functions\"\n and \"methods\". Search around online to see what other people do\n to define these. Do not worry if they confuse you. It's normal for\n a programmer to confuse you with their vast extensive knowledge.\n4. Get rid of the part from line 10-15 where you use ``raw_input`` and\n try the script then.\n5. Use only ``raw_input`` and try the script that way. Think of why one\n way of getting the filename would be better than another.\n6. Run ``pydoc file`` and scroll down until you see the ``read()`` command\n (method/function). See all the other ones you can use? Skip the\n ones that have ``__`` (two underscores) in front because those are junk.\n Try some of the other commands.\n7. Startup ``python`` again and use ``open`` from the prompt. Notice\n how you can open files and run ``read`` on them right there?\n8. Have your script also do a ``close()`` on the ``txt`` and ``txt_again``\n variables. It's important to close files when you are done with them.\n\n" }, { "alpha_fraction": 0.7599244117736816, "alphanum_fraction": 0.764650285243988, "avg_line_length": 50.585365295410156, "blob_id": "0af86c3773fa2fdba5157aae577dce4c9a0fc2e5", "content_id": "42b0c9e3f792b9238787394f86c27b5b1aeae6b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2116, "license_type": "no_license", "max_line_length": 90, "num_lines": 41, "path": "/ex26.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 26: Congratulations, Take A Test!\n******************************************\n\nYou are almost done with the first half of the book. The second half is where\nthings get interesting. You will learn logic and be able to do useful\nthings like make decisions.\n\nBefore you continue, I have a quiz for you. This quiz will be *very*\nhard because it requires you to fix someone else's code. When you are a programmer\nyou often have to deal with another programmer's code, and also with their arrogance.\nThey will very frequently claim that their code is perfect. \n\nThese programmers are stupid people who care little for others. A good\nprogrammer assumes, like a good scientist, that there's always *some*\nprobability their code is wrong. Good programmers start from the premise that\ntheir software is broken and then work to rule out all possible ways it could\nbe wrong before finally admitting that maybe it really is the other guy's code.\n\nIn this exercise, you will practice dealing with a bad programmer by fixing\na bad programmer's code. I have poorly copied exercises 24 and 25 into a\nfile and removed random characters and added flaws. Most of the errors\nare things Python will tell you, while some of them are math errors you should find.\nOthers are formatting errors or spelling mistakes in the strings.\n\nAll of these errors are very common mistakes all programmers make. Even experienced ones.\n\nYour job in this exercise is to correct this file. Use all of your skills to\nmake this file better. Analyze it first, maybe printing it out to edit it like\nyou would a school term paper. Fix each flaw and keep running it and fixing it\nuntil the script runs perfectly. Try not to get help, and instead if you get\nstuck take a break and come back to it later.\n\nEven if this takes days to do, bust through it and make it right.\n\nFinally, the point of this exercise isn't to type it in, but to fix\nan existing file. To do that, you must go to:\n\n* http://learnpythonthehardway.org/exercise26.txt\n\nCopy-paste the code into a file named ``ex26.py``. This is the only\ntime you are allowed to copy-paste.\n\n" }, { "alpha_fraction": 0.6932132840156555, "alphanum_fraction": 0.7056786417961121, "avg_line_length": 19.05555534362793, "blob_id": "eb276159e38b271fe102dcbc2b9fbb9862cabcb4", "content_id": "ba639d94655576139ebb3544cf152ed3310c0e44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3122, "license_type": "no_license", "max_line_length": 77, "num_lines": 72, "path": "/cn/ex1.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 1: ็ฌฌไธ€ไธช็จ‹ๅบ\n********************************\n\nไฝ ๅบ”่ฏฅๅœจ็ปƒไน  0 ไธญ่Šฑไบ†ไธๅฐ‘็š„ๆ—ถ้—ด๏ผŒๅญฆไผšไบ†ๅฆ‚ไฝ•ๅฎ‰่ฃ…ๆ–‡ๆœฌ็ผ–่พ‘ๅ™จใ€่ฟ่กŒๆ–‡ๆœฌ็ผ–่พ‘ๅ™จใ€ไปฅๅŠๅฆ‚ไฝ•่ฟ่กŒๅ‘ฝไปค่กŒ็ปˆ็ซฏ๏ผŒ\\ \n่€Œไธ”ไฝ ๅทฒ็ป่Šฑๆ—ถ้—ด็†Ÿๆ‚‰ไบ†่ฟ™ไบ›ๅทฅๅ…ทใ€‚่ฏทไธ่ฆ่ทณ่ฟ‡ๅ‰ไธ€ไธช็ปƒไน ็š„ๅ†…ๅฎน็›ดๆŽฅ่ฟ›่กŒไธ‹้ข็š„ๅ†…ๅฎน๏ผŒ่ฟ™ไนŸๆ˜ฏๆœฌไนฆๅ”ฏไธ€็š„ไธ€ๆฌก่ฟ™ๆ ท็š„่ญฆ็คบใ€‚\n\n.. literalinclude:: ex/ex1.py\n :linenos:\n\nๅฐ†ไธŠ้ข็š„ๅ†…ๅฎนๅ†™ๅˆฐไธ€ไธชๆ–‡ไปถไธญ๏ผŒๅ–ๅไธบ ``ex1.py``\\ใ€‚ๆณจๆ„่ฟ™ไธชๅ‘ฝๅๆ–นๅผ๏ผŒPythonๆ–‡ไปถๆœ€ๅฅฝไปฅ ``.py`` ็ป“ๅฐพใ€‚\n\n.. warning::\n\n ไธ่ฆๆŠŠไธŠ้ขๅ†…ๅฎนๆœ€ๅทฆ่พน็š„ๆ•ฐๅญ—ไนŸ่พ“่ฟ›ๅŽปใ€‚่ฟ™ไบ›ๆ˜ฏๆ‰€่ฐ“็š„โ€œ่กŒๅท(line numbers)โ€๏ผŒ็จ‹ๅบๅ‘˜ๅœจ่ฐˆ่ฎบๅˆฐ็จ‹ๅบไธญๆŸไธชไฝ็ฝฎ\\\n ็š„้”™่ฏฏๆ—ถไผšไฝฟ็”จๅˆฐ่กŒๅทใ€‚Python ๅœจ็จ‹ๅบๅ‡บ้”™ๆ—ถไนŸไผšไปฅ่กŒๅท็š„ๆ–นๅผๅ‘Š่ฏ‰ไฝ ้”™่ฏฏไฟกๆฏ๏ผŒไฝ†ๆ˜ฏไฝ ๆ˜ฏไธ้œ€่ฆ่พ“ๅ…ฅ่ฟ™ไบ›่กŒๅท็š„ใ€‚\n\n\n็„ถๅŽไฝ ้œ€่ฆๅœจๅ‘ฝไปค่กŒ็ปˆ็ซฏ้€š่ฟ‡่พ“ๅ…ฅไปฅไธ‹ๅ†…ๅฎนๆฅ่ฟ่กŒ่ฟ™ๆฎตไปฃ็ ๏ผš\n\n.. code-block:: bash\n\n python ex1.py\n\n\nๅฆ‚ๆžœไฝ ๅ†™ๅฏนไบ†็š„่ฏ๏ผŒไฝ ๅบ”่ฏฅ็œ‹ๅˆฐๅ’Œไธ‹้ขไธ€ๆ ท็š„ๅ†…ๅฎนใ€‚ๅฆ‚ๆžœไธไธ€ๆ ท๏ผŒ้‚ฃๅฐฑๆ˜ฏไฝ ๅผ„้”™ไบ†ไป€ไนˆไธœ่ฅฟใ€‚\\\nไธๆ˜ฏ่ฎก็ฎ—ๆœบๅ‡บ้”™ไบ†๏ผŒ่ฎก็ฎ—ๆœบๆฒก้”™ใ€‚\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„ๅ†…ๅฎน\n===================\n\n.. literalinclude:: ex/ex1.txt\n\nไฝ ไนŸ่ฎธไผš็œ‹ๅˆฐ ``$`` ๅ‰้ขไผšๆ˜พ็คบไฝ ๆ‰€ๅœจ็š„็›ฎๅฝ•็š„ๅๅญ—๏ผŒ่ฟ™ไธๆ˜ฏ้—ฎ้ข˜๏ผŒไฝ†ๅฆ‚ๆžœไฝ ็š„่พ“ๅ‡บไธไธ€ๆ ท\\\n็š„่ฏ๏ผŒไฝ ้œ€่ฆๆ‰พๅ‡บไธบไป€ไนˆไผšไธไธ€ๆ ท๏ผŒ็„ถๅŽๆŠŠไฝ ็š„็จ‹ๅบๆ”นๅฏนใ€‚\n\nๅฆ‚ๆžœไฝ ็œ‹ๅˆฐ็ฑปไผผๅฆ‚ไธ‹็š„้”™่ฏฏไฟกๆฏ๏ผš\n\n.. literalinclude:: ex/ex1.err\n :language: python\n :linenos:\n\n่ฟ™ไบ›ๅ†…ๅฎนไฝ ๅบ”่ฏฅๅญฆไผš็œ‹ๆ‡‚็š„๏ผŒ่ฟ™ๆ˜ฏๅพˆ้‡่ฆ็š„ไธ€็‚น๏ผŒๅ› ไธบไฝ ไปฅๅŽ่ฟ˜ไผš็Šฏ็ฑปไผผ็š„้”™่ฏฏใ€‚ๅฐฑๆ˜ฏๆˆ‘็ŽฐๅœจไนŸไผš\\\n็Šฏ่ฟ™ๆ ท็š„้”™่ฏฏใ€‚่ฎฉๆˆ‘ไปฌไธ€่กŒไธ€่กŒๆฅ็œ‹ใ€‚\n\n1. ้ฆ–ๅ…ˆๆˆ‘ไปฌๅœจๅ‘ฝไปค่กŒ็ปˆ็ซฏ่พ“ๅ…ฅๅ‘ฝไปคๆฅ่ฟ่กŒ ``ex1.py`` ่„šๆœฌใ€‚\n2. Python ๅ‘Š่ฏ‰ๆˆ‘ไปฌ ``ex1.py`` ๆ–‡ไปถ็š„็ฌฌ 3 ่กŒๆœ‰ไธ€ไธช้”™่ฏฏใ€‚\n3. ็„ถๅŽ่ฟ™ไธ€่กŒ็š„ๅ†…ๅฎน่ขซๆ‰“ๅฐไบ†ๅ‡บๆฅใ€‚\n4. ็„ถๅŽ Python ๆ‰“ๅฐๅ‡บไธ€ไธช ``^`` (ไบ•ๅท๏ผŒcaret) ็ฌฆๅท๏ผŒ็”จๆฅๆŒ‡็คบๅ‡บ้”™็š„ไฝ็ฝฎใ€‚\n ๆณจๆ„ๅˆฐๅฐ‘ไบ†ไธ€ไธช ``\"`` (ๅŒๅผ•ๅท๏ผŒdouble-quote) ็ฌฆๅทไบ†ๅ—๏ผŸ\n5. ๆœ€ๅŽ๏ผŒๅฎƒๆ‰“ๅฐๅ‡บไบ†ไธ€ไธชโ€œ่ฏญๆณ•้”™่ฏฏ(SyntaxError)โ€ๅ‘Š่ฏ‰ไฝ ็ฉถ็ซŸๆ˜ฏไป€ไนˆๆ ท็š„้”™่ฏฏใ€‚้€šๅธธ่ฟ™ไบ›้”™่ฏฏไฟกๆฏ\\\n ้ƒฝ้žๅธธ้šพๆ‡‚๏ผŒไธ่ฟ‡ไฝ ๅฏไปฅๆŠŠ้”™่ฏฏไฟกๆฏ็š„ๅ†…ๅฎนๅคๅˆถๅˆฐๆœ็ดขๅผ•ๆ“Ž้‡Œ๏ผŒ็„ถๅŽไฝ ๅฐฑ่ƒฝ็œ‹ๅˆฐๅˆซไบบไนŸ้‡ๅˆฐ่ฟ‡่ฟ™ๆ ท็š„้”™่ฏฏ๏ผŒ\n ่€Œไธ”ไฝ ไนŸ่ฎธ่ƒฝๆ‰พๅˆฐๅฆ‚ไฝ•่งฃๅ†ณ่ฟ™ไธช้—ฎ้ข˜ใ€‚\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\nไฝ ่ฟ˜ไผšๆœ‰ ``ๅŠ ๅˆ†ไน ้ข˜`` ้œ€่ฆๅฎŒๆˆใ€‚ๅŠ ๅˆ†ไน ้ข˜้‡Œ่พน็š„ๅ†…ๅฎนๆ˜ฏไพ›ไฝ ๅฐ่ฏ•็š„ใ€‚ๅฆ‚ๆžœไฝ ่ง‰ๅพ—ๅšไธๅ‡บๆฅ๏ผŒไฝ ๅฏไปฅๆš‚ๆ—ถ\\\n่ทณ่ฟ‡๏ผŒ่ฟ‡ๆฎตๆ—ถ้—ดๅ†ๅ›žๆฅๅšใ€‚\n\nๅœจ่ฟ™ไธช็ปƒไน ไธญ๏ผŒ่ฏ•่ฏ•่ฟ™ไบ›ไธœ่ฅฟ๏ผš\n\n1. ่ฎฉไฝ ็š„่„šๆœฌๅ†ๅคšๆ‰“ๅฐไธ€่กŒใ€‚\n2. ่ฎฉไฝ ็š„่„šๆœฌๅชๆ‰“ๅฐไธ€่กŒใ€‚\n3. ๅœจไธ€่กŒ็š„่ตทๅง‹ไฝ็ฝฎๆ”พไธ€ไธช '#' (octothorpe) ็ฌฆๅทใ€‚ๅฎƒ็š„ไฝœ็”จๆ˜ฏไป€ไนˆ๏ผŸ่‡ชๅทฑ็ ”็ฉถไธ€ไธ‹ใ€‚\n\nไปŽ็Žฐๅœจๅผ€ๅง‹๏ผŒ้™ค้ž็‰นๅˆซๆƒ…ๅ†ต๏ผŒๆˆ‘ๅฐ†ไธๅ†่งฃ้‡Šๆฏไธชไน ้ข˜็š„ๅทฅไฝœๅŽŸ็†ไบ†ใ€‚\n\n\n.. note::\n\n ไบ•ๅทๆœ‰ๅพˆๅคš็š„่‹ฑๆ–‡ๅๅญ—๏ผŒไพ‹ๅฆ‚๏ผš'octothorpe(ๅ…ซ่ง’ๅธฝ)'๏ผŒ'pound(่‹ฑ้•‘็ฌฆ)', 'hash(็”ต่ฏ็š„#้”ฎ)', 'mesh(็ฝ‘)' ็ญ‰ใ€‚\n" }, { "alpha_fraction": 0.6979332566261292, "alphanum_fraction": 0.7042925357818604, "avg_line_length": 33.56044006347656, "blob_id": "dc4fba0c4b5448a4192e78550b4acb18f9f71ad2", "content_id": "f48798f24c30e39bedad1ee2759e959c96ef34c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3145, "license_type": "no_license", "max_line_length": 125, "num_lines": 91, "path": "/ex1.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 1: A Good First Program\n********************************\n\nRemember, you should have spent a good amount of time in Exercise 0 learning\nhow to install a text editor, run the text editor, run the Terminal, and\nwork with both of them. If you haven't done that then do not go on. You will\nnot have a good time. This is the only time I'll start an exercise with a \nwarning that you should not skip or get ahead of yourself.\n\n.. literalinclude:: ex/ex1.py\n :linenos:\n\nType the above into a single file named ``ex1.py``. This is important\nas python works best with files ending in ``.py``.\n\n.. warning::\n\n Do not type the numbers on the far left of these lines. Those are called\n \"line numbers\" and they are used by programmers to talk about what part of \n a program is wrong. Python will tell you errors related to these line numbers,\n but *you* do not type them in.\n\n\nThen in Terminal *run* the file by typing:\n\n.. code-block:: console\n\n python ex1.py\n\n\nIf you did it right then you should see the same output I have below. If not,\nyou have done something wrong. No, the computer is not wrong.\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex1.txt\n :language: console\n\nYou may see the name of your directory before the ``$`` which is fine,\nbut if your output is not exactly the same, find out why and fix it.\n\nIf you have an error it will look like this:\n\n.. literalinclude:: ex/ex1.err\n :language: console\n :linenos:\n\nIt's important that you can read these since you will be making many of \nthese mistakes. Even I make many of these mistakes. Let's look at this\nline-by-line.\n\n1. Here we ran our command in the terminal to run the ``ex1.py`` script.\n2. Python then tells us that the file ``ex1.py`` has an error on line 3.\n3. It then prints this line for us.\n4. Then it puts a ``^`` (caret) character to point at where the problem is.\n Notice the missing ``\"`` (double-quote) character?\n5. Finally, it prints out a \"SyntaxError\" and tells us something about what might\n be the error. Usually these are very cryptic, but if you copy that text into\n a search engine, you will find someone else who's had that error and you can probably\n figure out how to fix it.\n\n.. warning::\n\n If you are from another country, and you get errors about ASCII encodings, then\n put this at the top of your python scripts:\n\n # -*- coding: utf-8 -*-\n\n It will fix them so that you can use unicode UTF-8 in your scripts without a problem.\n\nExtra Credit\n============\n\nYou will also have ``Extra Credit``. The Extra Credit contains things you\nshould *try* to do. If you can't, skip it and come back later.\n\nFor this exercise, try these things:\n\n1. Make your script print another line.\n2. Make your script print only one of the lines.\n3. Put a '#' (octothorpe) character at the beginning of a line. What did it do?\n Try to find out what this character does.\n\nFrom now on, I won't explain how each exercise works unless an exercise is\ndifferent.\n\n\n.. note::\n\n An 'octothorpe' is also called a 'pound', 'hash', 'mesh', or any number of names. Pick the one that makes you chill out.\n" }, { "alpha_fraction": 0.7448042631149292, "alphanum_fraction": 0.7564040422439575, "avg_line_length": 22.78160858154297, "blob_id": "ef3d4688c5b24d20af6e9bf2a6346f9debbc1660", "content_id": "7cde2521ee9f9f269fac7a6a6ff0bef35205ede1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4889, "license_type": "no_license", "max_line_length": 67, "num_lines": 87, "path": "/cn/ex47.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "็ปƒไน  47: ่‡ชๅŠจๅŒ–ๆต‹่ฏ•\n******************************\n\nไธบไบ†็กฎ่ฎคๆธธๆˆ็š„ๅŠŸ่ƒฝๆ˜ฏๅฆๆญฃๅธธ๏ผŒไฝ ้œ€่ฆไธ€้ไธ€้ๅœฐๅœจไฝ ็š„ๆธธๆˆไธญ่พ“ๅ…ฅๅ‘ฝไปคใ€‚่ฟ™ไธช่ฟ‡็จ‹ๆ˜ฏๅพˆ\\\nๆžฏ็‡ฅๆ— ๅ‘ณ็š„ใ€‚ๅฆ‚ๆžœ่ƒฝๅ†™ไธ€ๅฐๆฎตไปฃ็ ็”จๆฅๆต‹่ฏ•ไฝ ็š„ไปฃ็ ๅฒ‚ไธๆ˜ฏๆ›ดๅฅฝ๏ผŸ็„ถๅŽๅช่ฆไฝ ๅฏน็จ‹ๅบๅšไบ†\\\nไปปไฝ•ไฟฎๆ”น๏ผŒๆˆ–่€…ๆทปๅŠ ไบ†ไป€ไนˆๆ–ฐไธœ่ฅฟ๏ผŒไฝ ๅช่ฆโ€œ่ท‘ไธ€ไธ‹ไฝ ็š„ๆต‹่ฏ•โ€๏ผŒ่€Œ่ฟ™ไบ›ๆต‹่ฏ•่ƒฝ็กฎ่ฎค็จ‹ๅบไพ\\\n็„ถ่ƒฝๆญฃ็กฎ่ฟ่กŒใ€‚่ฟ™ไบ›่‡ชๅŠจๆต‹่ฏ•ไธไผšๆŠ“ๅˆฐๆ‰€ๆœ‰็š„ bug๏ผŒไฝ†ๅฏไปฅ่ฎฉไฝ ๆ— ้œ€้‡ๅค่พ“ๅ…ฅๅ‘ฝไปค่ฟ่กŒไฝ ็š„\\\nไปฃ็ ๏ผŒไปŽ่€Œไธบไฝ ่Š‚็บฆๅพˆๅคšๆ—ถ้—ดใ€‚\n\nไปŽ่ฟ™ไธ€็ซ ๅผ€ๅง‹๏ผŒไปฅๅŽ็š„็ปƒไน ๅฐ†ไธไผšๆœ‰โ€œไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœโ€่ฟ™ไธ€่Š‚๏ผŒๅ–่€Œไปฃไน‹็š„ๆ˜ฏไธ€ไธชโ€œไฝ ๅบ”\\\n่ฏฅๆต‹่ฏ•็š„ไธœ่ฅฟโ€ไธ€่Š‚ใ€‚ไปŽ็Žฐๅœจๅผ€ๅง‹๏ผŒไฝ ้œ€่ฆไธบ่‡ชๅทฑๅ†™็š„ๆ‰€ๆœ‰ไปฃ็ ๅ†™่‡ชๅŠจๅŒ–ๆต‹่ฏ•๏ผŒ่€Œ่ฟ™ๅฐ†่ฎฉไฝ \\\nๆˆไธบไธ€ไธชๆ›ดๅฅฝ็š„็จ‹ๅบๅ‘˜ใ€‚\n\nๆˆ‘ไธไผš่ฏ•ๅ›พ่งฃ้‡Šไธบไป€ไนˆไฝ ้œ€่ฆๅ†™่‡ชๅŠจๅŒ–ๆต‹่ฏ•ใ€‚ๆˆ‘่ฆๅ‘Š่ฏ‰ไฝ ็š„ๆ˜ฏ๏ผŒไฝ ๆƒณ่ฆๆˆไธบไธ€ไธช็จ‹ๅบๅ‘˜๏ผŒ่€Œ\\\n็จ‹ๅบ็š„ไฝœ็”จๆ˜ฏ่ฎฉๆ— ่Šๅ†—็น็š„ๅทฅไฝœ่‡ชๅŠจๅŒ–๏ผŒๆต‹่ฏ•่ฝฏไปถๆฏซๆ— ็–‘้—ฎๆ˜ฏๆ— ่Šๅ†—็น็š„๏ผŒๆ‰€ไปฅไฝ ่ฟ˜ๆ˜ฏๅ†™็‚น\\\nไปฃ็ ่ฎฉๅฎƒไธบไฝ ๆต‹่ฏ•็š„ๆ›ดๅฅฝใ€‚\n\n่ฟ™ๅบ”่ฏฅๆ˜ฏไฝ ้œ€่ฆ็š„ๆ‰€ๆœ‰็š„่งฃ้‡Šไบ†ใ€‚ๅ› ไธบไฝ ๅ†™ๅ•ๅ…ƒๆต‹่ฏ•็š„ๅŽŸๅ› ๆ˜ฏ่ฎฉไฝ ็š„ๅคง่„‘ๆ›ดๅŠ ๅผบๅฅใ€‚ไฝ ่ฏป\\\nไบ†่ฟ™ๆœฌไนฆ๏ผŒๅ†™ไบ†ๅพˆๅคšไปฃ็ ่ฎฉๅฎƒไปฌๅฎž็Žฐไธ€ไบ›ไบ‹ๆƒ…ใ€‚็Žฐๅœจไฝ ๅฐ†ๆ›ด่ฟ›ไธ€ๆญฅ๏ผŒๅ†™ๅ‡บๆ‡‚ๅพ—ไฝ ๅ†™็š„ๅ…ถไป–\\\nไปฃ็ ็š„ไปฃ็ ใ€‚่ฟ™ไธชๅ†™ไปฃ็ ๆต‹่ฏ•ไฝ ๅ†™็š„ๅ…ถไป–ไปฃ็ ็š„่ฟ‡็จ‹ๅฐ†ๅผบ่ฟซไฝ ๆธ…ๆฅš็š„็†่งฃไฝ ไน‹ๅ‰ๅ†™็š„ไปฃ็ ใ€‚\\\n่ฟ™ไผš่ฎฉไฝ ๆ›ดๆธ…ๆ™ฐๅœฐไบ†่งฃไฝ ๅ†™็š„ไปฃ็ ๅฎž็Žฐ็š„ๅŠŸ่ƒฝๅŠๅ…ถๅŽŸ็†๏ผŒ่€Œไธ”่ฎฉไฝ ๅฏน็ป†่Š‚็š„ๆณจๆ„ๆ›ดไธŠไธ€ไธชๅฐ้˜ถใ€‚\n\n\nๆ’ฐๅ†™ๆต‹่ฏ•็”จไพ‹\n===================\n\nๆˆ‘ไปฌๅฐ†ๆ‹ฟไธ€ๆฎต้žๅธธ็ฎ€ๅ•็š„ไปฃ็ ไธบไพ‹๏ผŒๅ†™ไธ€ไธช็ฎ€ๅ•็š„ๆต‹่ฏ•๏ผŒ่ฟ™ไธชๆต‹่ฏ•ๅฐ†ๅปบ็ซ‹ๅœจไธŠ่Š‚ๆˆ‘ไปฌๅˆ›ๅปบ\\\n็š„้กน็›ฎ้ชจๆžถไธŠ้ขใ€‚\n\n้ฆ–ๅ…ˆไปŽไฝ ็š„้กน็›ฎ้ชจๆžถๅˆ›ๅปบไธ€ไธชๅซๅš ``ex47`` ็š„้กน็›ฎใ€‚็กฎ่ฎค่ฏฅๆ”นๅ็งฐ็š„ๅœฐๆ–น้ƒฝๆœ‰ๆ”น่ฟ‡๏ผŒๅฐคๅ…ถ\\\nๆ˜ฏ ``tests/ex47_tests.py`` ่ฟ™ๅค„ไธ่ฆๅ†™้”™๏ผŒๅฆๅค–่ฟ่กŒ ``nosetest`` ็กฎ่ฎคไธ€ไธ‹ๆฒกๆœ‰้”™่ฏฏไฟกๆฏใ€‚\\\nๆฃ€ๆŸฅไธ€ไธ‹ ``tests/skel_tests.pyc`` ่ฟ™ไธชๆ–‡ไปถ๏ผŒๆœ‰็š„่ฏๅฐฑๆŠŠๅฎƒๅˆ ๆމ๏ผŒ่ฟ™ไธ€็‚น้œ€่ฆๅฐคๅ…ถๆณจๆ„ใ€‚\n\nๆŽฅไธ‹ๆฅๅˆ›ๅปบไธ€ไธช็ฎ€ๅ•็š„ ``ex47/game.py`` ๆ–‡ไปถ๏ผŒ้‡Œ่พนๆ”พไธ€ไบ›็”จๆฅ่ขซๆต‹่ฏ•็š„ไปฃ็ ใ€‚ๆˆ‘ไปฌ็Žฐๅœจๆ”พ\\\nไธ€ไธชๅ‚ปไนŽไนŽ็š„ๅฐ class ่ฟ›ๅŽป๏ผŒ็”จๆฅไฝœไธบๆˆ‘ไปฌ็š„ๆต‹่ฏ•ๅฏน่ฑก๏ผš\n\n.. literalinclude:: ex/ex47.py\n :linenos:\n\nๅ‡†ๅค‡ๅฅฝไบ†่ฟ™ไธชๆ–‡ไปถ๏ผŒๆŽฅไธ‹ๆฅๆŠŠๆต‹่ฏ•้ชจๆžถๆ”นๆˆ่ฟ™ๆ ทๅญ๏ผš\n\n.. literalinclude:: ex/ex47_tests.py\n :linenos:\n\n่ฟ™ไธชๆ–‡ไปถ import ไบ†ไฝ ๅœจ ``ex47.game`` ๅˆ›ๅปบ็š„ ``Room`` ่ฟ™ไธช็ฑป๏ผŒๆŽฅไธ‹ๆฅๆˆ‘ไปฌ่ฆๅš็š„\\\nๅฐฑๆ˜ฏๆต‹่ฏ•ๅฎƒใ€‚ไบŽๆ˜ฏๆˆ‘ไปฌ็œ‹ๅˆฐไธ€็ณปๅˆ—็š„ไปฅ ``test_`` ๅผ€ๅคด็š„ๆต‹่ฏ•ๅ‡ฝๆ•ฐ๏ผŒๅฎƒไปฌๅฐฑๆ˜ฏๆ‰€่ฐ“็š„\\\nโ€œๆต‹่ฏ•็”จไพ‹(test case)โ€๏ผŒๆฏไธ€ไธชๆต‹่ฏ•็”จไพ‹้‡Œ้ข้ƒฝๆœ‰ไธ€ๅฐๆฎตไปฃ็ ๏ผŒๅฎƒไปฌไผšๅˆ›ๅปบไธ€ไธชๆˆ–่€…\\\nไธ€ไบ›ๆˆฟ้—ด๏ผŒ็„ถๅŽๅŽป็กฎ่ฎคๆˆฟ้—ด็š„ๅŠŸ่ƒฝๅ’Œไฝ ๆœŸๆœ›็š„ๆ˜ฏๅฆไธ€ๆ ทใ€‚ๅฎƒๆต‹่ฏ•ไบ†ๅŸบๆœฌ็š„ๆˆฟ้—ดๅŠŸ่ƒฝ๏ผŒ็„ถๅŽ\\\nๆต‹่ฏ•ไบ†่ทฏๅพ„๏ผŒๆœ€ๅŽๆต‹่ฏ•ไบ†ๆ•ดไธชๅœฐๅ›พใ€‚\n\n่ฟ™้‡Œๆœ€้‡่ฆ็š„ๅ‡ฝๆ•ฐๆ—ถ ``assert_equal``\\๏ผŒๅฎƒไฟ่ฏไบ†ไฝ ่ฎพ็ฝฎ็š„ๅ˜้‡๏ผŒไปฅๅŠไฝ ๅœจ ``Room``\n้‡Œ่ฎพ็ฝฎ็š„่ทฏๅพ„ๅ’Œไฝ ็š„ๆœŸๆœ›็›ธ็ฌฆใ€‚ๅฆ‚ๆžœไฝ ๅพ—ๅˆฐ้”™่ฏฏ็š„็ป“ๆžœ็š„่ฏ๏ผŒ ``nosetests`` ๅฐ†ไผšๆ‰“ๅฐๅ‡บ\\\nไธ€ไธช้”™่ฏฏไฟกๆฏ๏ผŒ่ฟ™ๆ ทไฝ ๅฐฑๅฏไปฅๆ‰พๅˆฐๅ‡บ้”™็š„ๅœฐๆ–นๅนถไธ”ไฟฎๆญฃ่ฟ‡ๆฅใ€‚\n\nๆต‹่ฏ•ๆŒ‡ๅ—\n==================\n\nๅœจๅ†™ๆต‹่ฏ•ไปฃ็ ๆ—ถ๏ผŒไฝ ๅฏไปฅ็…ง็€ไธ‹้ข่ฟ™ไบ›ไธๆ˜ฏๅพˆไธฅๆ ผ็š„ๆŒ‡ๅ—ๆฅๅš๏ผš\n\n1. ๆต‹่ฏ•่„šๆœฌ่ฆๆ”พๅˆฐ ``tests/`` ็›ฎๅฝ•ไธ‹๏ผŒๅนถไธ”ๅ‘ฝๅไธบ ``BLAH_tests.py`` ๏ผŒๅฆๅˆ™ ``nosetests``\n ๅฐฑไธไผšๆ‰ง่กŒไฝ ็š„ๆต‹่ฏ•่„šๆœฌไบ†ใ€‚่ฟ™ๆ ทๅš่ฟ˜ๆœ‰ไธ€ไธชๅฅฝๅค„ๅฐฑๆ˜ฏ้˜ฒๆญขๆต‹่ฏ•ไปฃ็ ๅ’Œๅˆซ็š„ไปฃ็ ไบ’็›ธๆททๆމใ€‚\n2. ไธบไฝ ็š„ๆฏไธ€ไธชๆจก็ป„ๅ†™ไธ€ไธชๆต‹่ฏ•ใ€‚\n3. ๆต‹่ฏ•็”จไพ‹๏ผˆๅ‡ฝๆ•ฐ๏ผ‰ไฟๆŒ็ฎ€็Ÿญ๏ผŒไฝ†ๅฆ‚ๆžœ็œ‹ไธŠๅŽปไธๆ€Žไนˆๆ•ดๆดไนŸๆฒกๅ…ณ็ณป๏ผŒๆต‹่ฏ•็”จไพ‹ไธ€่ˆฌ้ƒฝๆœ‰็‚นไนฑใ€‚\n4. ๅฐฑ็ฎ—ๆต‹่ฏ•็”จไพ‹ๆœ‰ไบ›ไนฑ๏ผŒไนŸ่ฆ่ฏ•็€่ฎฉไป–ไปฌไฟๆŒๆ•ดๆด๏ผŒๆŠŠ้‡Œ่พน้‡ๅค็š„ไปฃ็ ๅˆ ๆމใ€‚ๅˆ›ๅปบไธ€ไบ›่พ…ๅŠฉๅ‡ฝๆ•ฐ\\\n ๆฅ้ฟๅ…้‡ๅค็š„ไปฃ็ ใ€‚ๅฝ“ไฝ ไธ‹ๆฌกๅœจๆ”นๅฎŒไปฃ็ ้œ€่ฆๆ”นๆต‹่ฏ•็š„ๆ—ถๅ€™๏ผŒไฝ ไผšๆ„Ÿ่ฐขๆˆ‘่ฟ™ไธ€ๆกๅปบ่ฎฎ็š„ใ€‚้‡ๅค\\\n ็š„ไปฃ็ ไผš่ฎฉไฟฎๆ”นๆต‹่ฏ•ๅ˜ๅพ—ๅพˆ้šพๆ“ไฝœใ€‚\n5. ๆœ€ๅŽไธ€ๆกๆ˜ฏๅˆซๅคชๆŠŠๆต‹่ฏ•ๅฝ“ๅšไธ€ๅ›žไบ‹ใ€‚ๆœ‰ๆ—ถๅ€™๏ผŒๆ›ดๅฅฝ็š„ๆ–นๆณ•ๆ˜ฏๆŠŠไปฃ็ ๅ’Œๆต‹่ฏ•ๅ…จ้ƒจๅˆ ๆމ๏ผŒ็„ถๅŽ้‡ๆ–ฐ่ฎพ่ฎก\\\n ไปฃ็ ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex47.txt\n\nๅฆ‚ๆžœไธ€ๅˆ‡ๅทฅไฝœๆญฃๅธธ็š„่ฏ๏ผŒไฝ ็œ‹ๅˆฐ็š„็ป“ๆžœๅบ”่ฏฅๅฐฑๆ˜ฏ่ฟ™ๆ ทใ€‚่ฏ•็€ๆŠŠไปฃ็ ๆ”น้”™ๅ‡ ไธชๅœฐๆ–น๏ผŒ็„ถๅŽ็œ‹้”™่ฏฏไฟกๆฏ\\\nไผšๆ˜ฏไป€ไนˆ๏ผŒๅ†ๆŠŠไปฃ็ ๆ”นๆญฃ็กฎใ€‚\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n\n1. ไป”็ป†่ฏป่ฏป nosetest ็›ธๅ…ณ็š„ๆ–‡ๆกฃ๏ผŒๅ†ๅŽปไบ†่งฃไธ€ไธ‹ๅ…ถไป–็š„ๆ›ฟไปฃๆ–นๆกˆใ€‚\n2. ไบ†่งฃไธ€ไธ‹ Python ็š„ \"doc tests\" ๏ผŒ็œ‹็œ‹ไฝ ๆ˜ฏไธๆ˜ฏๆ›ดๅ–œๆฌข่ฟ™็งๆต‹่ฏ•ๆ–นๅผใ€‚\n3. ๆ”น่ฟ›ไฝ ๆธธๆˆ้‡Œ็š„ Room๏ผŒ็„ถๅŽ็”จๅฎƒ้‡ๅปบไฝ ็š„ๆธธๆˆ๏ผŒ่ฟ™ๆฌก้‡ๅ†™๏ผŒไฝ ้œ€่ฆไธ€่พนๅ†™ไปฃ็ ๏ผŒ\\\n ไธ€่พนๆŠŠๅ•ๅ…ƒๆต‹่ฏ•ๅ†™ๅ‡บๆฅใ€‚\n" }, { "alpha_fraction": 0.6532751321792603, "alphanum_fraction": 0.6689956188201904, "avg_line_length": 21, "blob_id": "4c79e5a1d3f32a6d0ee629ce77bc44940cc9044b", "content_id": "0b8fc36c9d51e59650e472604bdc54770a366e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2409, "license_type": "no_license", "max_line_length": 62, "num_lines": 52, "path": "/cn/ex30.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 30: Else ๅ’Œ If\n*************************\n\nๅ‰ไธ€ไน ้ข˜ไธญไฝ ๅ†™ไบ†ไธ€ไบ› โ€œif ่ฏญๅฅ(if-statements)โ€๏ผŒๅนถไธ”่ฏ•ๅ›พ็Œœๅ‡บๅฎƒไปฌๆ˜ฏไป€ไนˆ๏ผŒ\\\nไปฅๅŠๅฎž็Žฐ็š„ๆ˜ฏไป€ไนˆๅŠŸ่ƒฝใ€‚ๅœจไฝ ็ปง็ปญๅญฆไน ไน‹ๅ‰๏ผŒๆˆ‘็ป™ไฝ ่งฃ้‡Šไธ€ไธ‹ไธŠไธ€่Š‚็š„ๅŠ ๅˆ†ไน ้ข˜็š„็ญ”ๆกˆใ€‚\\\nไธŠไธ€่Š‚็š„ๅŠ ๅˆ†ไน ้ข˜ไฝ ๅš่ฟ‡ไบ†ๅง๏ผŒๆœ‰ๆฒกๆœ‰๏ผŸ\n\n\n1. ไฝ ่ฎคไธบ ``if`` ๅฏนไบŽๅฎƒไธ‹ไธ€่กŒ็š„ไปฃ็ ๅšไบ†ไป€ไนˆ๏ผŸ\n If ่ฏญๅฅไธบไปฃ็ ๅˆ›ๅปบไบ†ไธ€ไธชๆ‰€่ฐ“็š„โ€œๅˆ†ๆ”ฏโ€๏ผŒๅฐฑ่ทŸ RPG ๆธธๆˆไธญ็š„ๆƒ…่Š‚ๅˆ†ๆ”ฏไธ€ๆ ทใ€‚\\\n if ่ฏญๅฅๅ‘Š่ฏ‰ไฝ ็š„่„šๆœฌ๏ผšโ€œๅฆ‚ๆžœ่ฟ™ไธชๅธƒๅฐ”่กจ่พพๅผไธบ็œŸ๏ผŒๅฐฑ่ฟ่กŒๆŽฅไธ‹ๆฅ็š„ไปฃ็ ๏ผŒๅฆๅˆ™ๅฐฑ่ทณ่ฟ‡่ฟ™ไธ€ๆฎตใ€‚โ€\n\n2. ไธบไป€ไนˆ ``if`` ่ฏญๅฅ็š„ไธ‹ไธ€่กŒ้œ€่ฆ 4 ไธช็ฉบๆ ผ็š„็ผฉ่ฟ›๏ผŸ\n ่กŒๅฐพ็š„ๅ†’ๅท็š„ไฝœ็”จๆ˜ฏๅ‘Š่ฏ‰ Python ๆŽฅไธ‹ๆฅไฝ ่ฆๅˆ›ๅปบไธ€ไธชๆ–ฐ็š„ไปฃ็ ๅŒบๆฎตใ€‚่ฟ™ๆ นไฝ ๅˆ›ๅปบๅ‡ฝๆ•ฐๆ—ถ็š„ๅ†’ๅท\\\n ๆ˜ฏไธ€ไธช้“็†ใ€‚\n\n3. ๅฆ‚ๆžœไธ็ผฉ่ฟ›, ไผšๅ‘็”Ÿไป€ไนˆไบ‹ๆƒ…?\n ๅฆ‚ๆžœไฝ ๆฒกๆœ‰็ผฉ่ฟ›๏ผŒไฝ ๅบ”่ฏฅไผš็œ‹ๅˆฐ Python ๆŠฅ้”™ใ€‚Python ็š„่ง„ๅˆ™้‡Œ๏ผŒๅช่ฆไธ€่กŒไปฅโ€œๅ†’ๅท(colon)โ€ ``:``\n ็ป“ๅฐพ๏ผŒๅฎƒๆŽฅไธ‹ๆฅ็š„ๅ†…ๅฎนๅฐฑๅบ”่ฏฅๆœ‰็ผฉ่ฟ›ใ€‚\n\n4. ๆŠŠไน ้ข˜ 26 ไธญ็š„ๅ…ถๅฎƒๅธƒๅฐ”่กจ่พพๅผๆ”พๅˆฐ ``if่ฏญๅฅ`` ไธญไผšไธไผšไนŸๅฏไปฅ่ฟ่กŒๅ‘ข๏ผŸ่ฏ•ไธ€ไธ‹ใ€‚\n ๅฏไปฅใ€‚่€Œไธ”ไธ็ฎกๅคšๅคๆ‚้ƒฝๅฏไปฅ๏ผŒ่™ฝ็„ถๅ†™ๅคๆ‚็š„ไธœ่ฅฟ้€šๅธธๆ˜ฏไธ€็งไธๅฅฝ็š„็ผ–็จ‹้ฃŽๆ ผใ€‚\n\n5. ๅฆ‚ๆžœๆŠŠๅ˜้‡ ``people``, ``cats``, ๅ’Œ ``dogs`` ็š„ๅˆๅง‹ๅ€ผๆ”นๆމ, ไผšๅ‘็”Ÿไป€ไนˆไบ‹ๆƒ…?\n ๅ› ไธบไฝ ๆฏ”่พƒ็š„ๅฏน่ฑกๆ˜ฏๆ•ฐๅญ—๏ผŒๅฆ‚ๆžœไฝ ๆŠŠ่ฟ™ไบ›ๆ•ฐๅญ—ๆ”นๆމ็š„่ฏ๏ผŒๆŸไบ›ไฝ็ฝฎ็š„ if ่ฏญๅฅไผš่ขซ\\\n ๆผ”็ปŽไธบ ``True``\\๏ผŒ่€Œๅฎƒไธ‹้ข็š„ไปฃ็ ๅŒบๆฎตๅฐ†่ขซ่ฟ่กŒใ€‚ไฝ ๅฏไปฅ่ฏ•็€ไฟฎๆ”น่ฟ™ไบ›ๆ•ฐๅญ—๏ผŒ็„ถๅŽๅœจๅคด่„‘้‡Œ\\\n ๅ‡ๆƒณไธ€ไธ‹้‚ฃไธ€ๆฎตไปฃ็ ไผš่ขซ่ฟ่กŒใ€‚\n\n\nๆŠŠๆˆ‘็š„็ญ”ๆกˆๅ’Œไฝ ็š„็ญ”ๆกˆๆฏ”่พƒไธ€ไธ‹๏ผŒ็กฎ่ฎค่‡ชๅทฑ็œŸๆญฃๆ‡‚ๅพ—ไปฃ็ โ€œๅŒบๆฎตโ€็š„ๅซไน‰ใ€‚่ฟ™็‚นๅฏนไบŽไฝ ไธ‹ไธ€่Š‚็š„็ปƒไน \\\nๅพˆ้‡่ฆ๏ผŒๅ› ไธบไฝ ๅฐ†ไผšๅ†™ๅพˆๅคš็š„ if ่ฏญๅฅใ€‚\n\nๆŠŠ่ฟ™ไธ€ๆฎตๅ†™ไธ‹ๆฅ๏ผŒๅนถ่ฎฉๅฎƒ่ฟ่กŒ่ตทๆฅ๏ผš\n\n.. literalinclude:: ex/ex30.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex30.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ็Œœๆƒณไธ€ไธ‹ ``elif`` ๅ’Œ ``else`` ็š„ๅŠŸ่ƒฝใ€‚\n2. ๅฐ† ``cars``, ``people``, ๅ’Œ ``buses`` ็š„ๆ•ฐ้‡ๆ”นๆމ๏ผŒ็„ถๅŽ่ฟฝๆบฏๆฏไธ€ไธช if ่ฏญๅฅใ€‚\\\n ็œ‹็œ‹ๆœ€ๅŽไผšๆ‰“ๅฐๅ‡บไป€ไนˆๆฅใ€‚\n3. ่ฏ•็€ๅ†™ไธ€ไบ›ๅคๆ‚็š„ๅธƒๅฐ”่กจ่พพๅผ๏ผŒไพ‹ๅฆ‚ ``cars > people and buses < cars``\\ใ€‚\n4. ๅœจๆฏไธ€่กŒ็š„ไธŠ้ขๅ†™ๆณจ่งฃ๏ผŒ่ฏดๆ˜Ž่ฟ™ไธ€่กŒ็š„ๅŠŸ็”จใ€‚\n\n" }, { "alpha_fraction": 0.7882462739944458, "alphanum_fraction": 0.7933768630027771, "avg_line_length": 20.646465301513672, "blob_id": "6b229ddfe7376f4f61136ced64f59fd9b16f9e91", "content_id": "75948b862c911d40dfbe1216c2e0e2bf6663cbd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5766, "license_type": "no_license", "max_line_length": 46, "num_lines": 99, "path": "/cn/intro.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ๅ‰่จ€๏ผš็ฌจๅŠžๆณ•ๆ›ด็ฎ€ๅ•\n**********************\n\n่ฟ™ๆœฌๅฐไนฆ็š„็›ฎ็š„ๆ˜ฏ่ฎฉไฝ ่ตทๆญฅ็ผ–็จ‹ใ€‚่™ฝ็„ถไนฆๅ่ฏดๆ˜ฏโ€œ็ฌจๅŠžๆณ•โ€,ไฝ†ๅ…ถๅฎžๅนถ้žๅฆ‚ๆญค. ๆ‰€่ฐ“็š„โ€œ็ฌจๅŠžๆณ•โ€ๆ˜ฏ\\\nๆŒ‡ๆœฌไนฆๆ•™ๆŽˆ็š„ๆ–นๅผใ€‚ๅœจ่ฟ™ๆœฌไนฆ็š„ๅธฎๅŠฉไธ‹๏ผŒไฝ ๅฐ†้€š่ฟ‡้žๅธธ็ฎ€ๅ•็š„็ปƒไน ๅญฆไผšไธ€้—จ็ผ–็จ‹่ฏญ่จ€ใ€‚ๅš็ปƒไน \nๆ˜ฏๆฏไธช็จ‹ๅบๅ‘˜็š„ๅฟ…็ปไน‹่ทฏ๏ผš\n\n1. ๅšๆฏไธ€้“ไน ้ข˜\n2. ไธ€ๅญ—ไธๅทฎๅœฐๅ†™ๅ‡บๆฏไธ€ไธช็จ‹ๅบ\n3. ่ฎฉ็จ‹ๅบ่ฟ่กŒ่ตทๆฅ\n\nๅฐฑๆ˜ฏ่ฟ™ๆ ทไบ†ใ€‚ๅˆšๅผ€ๅง‹่ฟ™ๅฏนไฝ ๆฅ่ฏดไผš้žๅธธ้šพ๏ผŒไฝ†ไฝ ้œ€่ฆๅšๆŒไธ‹ๅŽปใ€‚ๅฆ‚ๆžœไฝ ้€š่ฏปไบ†่ฟ™ๆœฌไนฆ๏ผŒๆฏ\\\nๆ™š่Šฑไธชไธ€ไธคๅฐๆ—ถๅšๅšไน ้ข˜๏ผŒไฝ ๅฏไปฅไธบ่‡ชๅทฑ่ฏปไธ‹ไธ€ๆœฌ็ผ–็จ‹ไนฆ็ฑๆ‰“ไธ‹่‰ฏๅฅฝ็š„ๅŸบ็ก€ใ€‚้€š่ฟ‡่ฟ™ๆœฌไนฆ\\\nไฝ ๅญฆๅˆฐ็š„ๅฏ่ƒฝไธๆ˜ฏ็œŸๆญฃ็š„็ผ–็จ‹๏ผŒไฝ†ไฝ ไผšๅญฆๅˆฐๆœ€ๅŸบๆœฌ็š„ๅญฆไน ๆ–นๆณ•ใ€‚\n\n่ฟ™ๆœฌไนฆ็š„็›ฎ็š„ๆ˜ฏๆ•™ไผšไฝ ็ผ–็จ‹ๆ–ฐๆ‰‹ๆ‰€้œ€็š„ไธ‰็งๆœ€้‡่ฆ็š„ๆŠ€่ƒฝ๏ผš่ฏปๅ’Œๅ†™ใ€ๆณจ้‡็ป†่Š‚ใ€ๅ‘็ŽฐไธๅŒใ€‚\n\n\n่ฏปๅ’Œๅ†™\n===================\n\nๅพˆๆ˜พ็„ถ๏ผŒๅฆ‚ๆžœไฝ ่ฟžๆ‰“ๅญ—้ƒฝๆˆ้—ฎ้ข˜็š„่ฏ๏ผŒ้‚ฃไฝ ๅญฆไน ็ผ–็จ‹ไนŸไผšๆˆ้—ฎ้ข˜ใ€‚ๅฐคๅ…ถๅฆ‚ๆžœไฝ ่ฟž็จ‹ๅบ\\\nๆบไปฃ็ ไธญ็š„้‚ฃไบ›ๅฅ‡ๆ€ชๅญ—็ฌฆ้ƒฝๆ‰“ไธๅ‡บๆฅ็š„่ฏ๏ผŒๅฐฑๆ นๆœฌๅˆซๆ็ผ–็จ‹ไบ†ใ€‚ๆฒกๆœ‰่ฟ™ๆ ทๅŸบๆœฌๆŠ€่ƒฝ็š„่ฏ๏ผŒ\\\nไฝ ๅฐ†่ฟžๆœ€ๅŸบๆœฌ็š„่ฝฏไปถๅทฅไฝœๅŽŸ็†้ƒฝ้šพไปฅๅญฆไผšใ€‚\n\nไธบไบ†่ฎฉไฝ ่ฎฐไฝๅ„็ง็ฌฆๅท็š„ๅๅญ—ๅนถๅฏนๅฎƒไปฌ็†Ÿๆ‚‰่ตทๆฅ๏ผŒไฝ ้œ€่ฆๅฐ†ไปฃ็ ๅ†™ไธ‹ๆฅๅนถไธ”่ฟ่กŒ่ตทๆฅใ€‚\\\n่ฟ™ไธช่ฟ‡็จ‹ไนŸไผš่ฎฉไฝ ๅฏน็ผ–็จ‹่ฏญ่จ€ๆ›ดๅŠ ็†Ÿๆ‚‰ใ€‚\n\nๆณจ้‡็ป†่Š‚\n===================\n\nๅŒบๅˆ†ๅฅฝ็จ‹ๅบๅ‘˜ๅ’Œๅทฎ็จ‹ๅบๅ‘˜็š„ๆœ€้‡่ฆ็š„ไธ€ไธชๆŠ€่ƒฝๅฐฑๆ˜ฏๅฏนไบŽ็ป†่Š‚็š„ๆณจ้‡็จ‹ๅบฆใ€‚ไบ‹ๅฎžไธŠ่ฟ™ๆ˜ฏไปปไฝ•\\\n่กŒไธšๅŒบๅˆ†ๅฅฝๅ็š„ๆ ‡ๅ‡†ใ€‚ๅฆ‚ๆžœ็ผบไนๅฏนไบŽๅทฅไฝœ็š„ๆฏไธ€ไธชๅพฎๅฐ็ป†่Š‚็š„ๆณจๆ„๏ผŒไฝ ็š„ๅทฅไฝœๆˆๆžœๅฐ†็ผบไน\\\n้‡่ฆ็š„ๅ…ƒ็ด ใ€‚ไปฅ็ผ–็จ‹ๆฅ่ฎฒ๏ผŒ่ฟ™ๆ ทไฝ ๅพ—ๅˆฐ็š„็ป“ๆžœๅช่ƒฝๆ˜ฏๆฏ›็—…ๅคšๅคš้šพไปฅไฝฟ็”จ็š„่ฝฏไปถใ€‚\n\n้€š่ฟ‡ๅฐ†ๆœฌไนฆ้‡Œ็š„ๆฏไธ€ไธชไพ‹ๅญไธ€ๅญ—ไธๅทฎๅœฐๆ‰“ๅ‡บๆฅ๏ผŒไฝ ๅฐ†้€š่ฟ‡ๅฎž่ทต่ฎญ็ปƒ่‡ชๅทฑ๏ผŒ่ฎฉ่‡ชๅทฑ\\\n้›†ไธญ็ฒพๅŠ›ๅˆฐไฝ ไฝœๅ“็š„็ป†่Š‚ไธŠ้ขใ€‚\n\n\nๅ‘็ŽฐไธๅŒ\n====================\n\n็จ‹ๅบๅ‘˜้•ฟๅนด็ดฏๆœˆ็š„ๅทฅไฝœไผšๅŸนๅ…ปๅ‡บไธ€ไธช้‡่ฆๆŠ€่ƒฝ๏ผŒ้‚ฃๅฐฑๆ˜ฏๅฏนไบŽไธๅŒ็‚น็š„ๅŒบๅˆ†่ƒฝๅŠ›ใ€‚ๆœ‰็ป้ชŒ็š„\\\n็จ‹ๅบๅ‘˜ๆ‹ฟ็€ไธคไปฝไป…ๆœ‰็ป†ๅพฎไธๅŒ็š„็จ‹ๅบ๏ผŒๅฏไปฅ็ซ‹ๅณๆŒ‡ๅ‡บ้‡Œ่พน็š„ไธๅŒ็‚นๆฅใ€‚็จ‹ๅบๅ‘˜็”š่‡ณ้€ ๅ‡บๅทฅๅ…ทๆฅ่ฎฉ\\\n่ฟ™ไปถไบ‹ๆ›ดๅŠ ๅฎนๆ˜“๏ผŒไธ่ฟ‡ๆˆ‘ไปฌไธไผš็”จๅˆฐ่ฟ™ไบ›ๅทฅๅ…ทใ€‚ไฝ ่ฆๅ…ˆ็”จ็ฌจๅŠžๆณ•่ฎญ็ปƒ่‡ชๅทฑ๏ผŒ็ญ‰ไฝ ๅ…ทๅค‡ไธ€ไบ›\\\n็›ธๅ…ณ่ƒฝๅŠ›็š„ๆ—ถๅ€™ๆ‰ๅฏไปฅไฝฟ็”จ่ฟ™ไบ›ๅทฅๅ…ทใ€‚\n\nๅœจไฝ ๅš่ฟ™ไบ›็ปƒไน ๅนถไธ”ๆ‰“ๅญ—่ฟ›ๅŽป็š„ๆ—ถๅ€™๏ผŒไฝ ไธ€ๅฎšไผšๅ†™้”™ไธœ่ฅฟใ€‚่ฟ™ๆ˜ฏไธๅฏ้ฟๅ…็š„๏ผŒๅณไฝฟๆœ‰็ป้ชŒ็š„\\\n็จ‹ๅบๅ‘˜ไนŸไผšๅถๅฐ”ๅ†™้”™ใ€‚ไฝ ็š„ไปปๅŠกๆ˜ฏๆŠŠ่‡ชๅทฑๅ†™็š„ไธœ่ฅฟๅ’Œ่ฆๆฑ‚็š„ๆญฃ็กฎ็ญ”ๆกˆๅฏนๆฏ”๏ผŒๆŠŠๆ‰€ๆœ‰็š„ไธๅŒ็‚น\\\n้ƒฝไฟฎๆญฃ่ฟ‡ๆฅใ€‚่ฟ™ๆ ท็š„่ฟ‡็จ‹ๅฏไปฅ่ฎฉไฝ ๅฏนไบŽ็จ‹ๅบ้‡Œ็š„้”™่ฏฏๅ’Œ bug ๆ›ดๅŠ ๆ•ๆ„Ÿใ€‚\n\n\nไธ่ฆๅคๅˆถ็ฒ˜่ดด\n=================\n\nไฝ ๅฟ…้กปๆ‰‹ๅŠจๅฐ†ๆฏไธช็ปƒไน ๆ‰“ๅ‡บๆฅใ€‚ๅคๅˆถ็ฒ˜่ดดไผš่ฎฉ่ฟ™ไบ›็ปƒไน ๅ˜ๅพ—ๆฏซๆ— ๆ„ไน‰ใ€‚\\\n่ฟ™ไบ›ไน ้ข˜็š„็›ฎ็š„ๆ˜ฏ่ฎญ็ปƒไฝ ็š„ๅŒๆ‰‹ๅ’Œๅคง่„‘ๆ€็ปด๏ผŒ่ฎฉไฝ ๆœ‰่ƒฝๅŠ›่ฏปไปฃ็ ใ€ๅ†™ไปฃ็ ใ€่ง‚ๅฏŸไปฃ็ ใ€‚\\\nๅฆ‚ๆžœไฝ ๅคๅˆถ็ฒ˜่ดด็š„่ฏ๏ผŒ้‚ฃไฝ ๅฐฑๆ˜ฏๅœจๆฌบ้ช—่‡ชๅทฑ๏ผŒ่€Œไธ”่ฟ™ไบ›็ปƒไน ็š„ๆ•ˆๆžœไนŸๅฐ†ๅคงๆ‰“ๆŠ˜ๆ‰ฃใ€‚\n\n\nๅฏนไบŽๅšๆŒ็ปƒไน ็š„ไธ€็‚นๆ็คบ\n==================================\n\nๅœจไฝ ้€š่ฟ‡่ฟ™ๆœฌไนฆๅญฆไน ็ผ–็จ‹ๆ—ถ๏ผŒๆˆ‘ๆญฃๅœจๅญฆไน ๅผนๅ‰ไป–ใ€‚ๆˆ‘ๆฏๅคฉ่‡ณๅฐ‘่ฎญ็ปƒ 2 ๅฐๆ—ถ๏ผŒ่‡ณๅฐ‘่Šฑไธ€ไธชๅฐๆ—ถ\\\n็ปƒไน ้Ÿณ้˜ถใ€ๅ’Œๅฃฐใ€ๅ’Œ็ถ้Ÿณ๏ผŒๅ‰ฉไธ‹็š„ๆ—ถ้—ด็”จๆฅๅญฆไน ้Ÿณไน็†่ฎบๅ’ŒๆญŒๆ›ฒๆผ”ๅฅไปฅๅŠ่ฎญ็ปƒๅฌๅŠ›็ญ‰ใ€‚ๆœ‰ๆ—ถๆˆ‘\\\nไธ€ๅคฉไผš่Šฑ 8 ไธชๅฐๆ—ถๆฅ็ปƒไน ๏ผŒๅ› ไธบๆˆ‘่ง‰ๅพ—่ฟ™ๆ˜ฏไธ€ไปถๆœ‰่ถฃ็š„ไบ‹ๆƒ…ใ€‚ๅฏนๆˆ‘ๆฅ่ฏด๏ผŒ่ฆๅญฆๅฅฝไธ€ๆ ทไธœ่ฅฟ๏ผŒๆฏๅคฉ\\\n็š„็ปƒไน ๆ˜ฏๅฟ…ไธๅฏๅฐ‘็š„ใ€‚ๅฐฑ็ฎ—่ฟ™ๅคฉไธชไบบ็Šถๆ€ๅพˆๅทฎ๏ผŒๆˆ–่€…่ฏดๅญฆไน ็š„่ฏพ้ข˜ๅฎžๅœจๅคช้šพ๏ผŒไฝ ไนŸไธๅฟ…ไป‹ๆ„๏ผŒ\\ \nๅช่ฆๅšๆŒๅฐ่ฏ•๏ผŒๆ€ปๆœ‰ไธ€ๅคฉๅ›ฐ้šพไผšๅ˜ๅพ—ๅฎนๆ˜“๏ผŒๆžฏ็‡ฅไนŸไผšๅ˜ๅพ—ๆœ‰่ถฃไบ†ใ€‚\n\nๅœจไฝ ้€š่ฟ‡่ฟ™ๆœฌไนฆๅญฆไน ็ผ–็จ‹็š„่ฟ‡็จ‹ไธญ่ฆ่ฎฐไฝไธ€็‚น๏ผŒๅฐฑๆ˜ฏๆ‰€่ฐ“็š„โ€œไธ‡ไบ‹ๅผ€ๅคด้šพโ€๏ผŒๅฏนไบŽๆœ‰ไปทๅ€ผ็š„ไบ‹ๆƒ…ๅฐคๅ…ถ\\\nๅฆ‚ๆญคใ€‚ไนŸ่ฎธไฝ ๆ˜ฏไธ€ไธชๅฎณๆ€•ๅคฑ่ดฅ็š„ไบบ๏ผŒไธ€็ขฐๅˆฐๅ›ฐ้šพๅฐฑๆƒณๆ”พๅผƒใ€‚ไนŸ่ฎธไฝ ๆ˜ฏไธ€ไธช็ผบไน่‡ชๅพ‹็š„ไบบ๏ผŒไธ€\\\n็ขฐๅˆฐโ€œๆ— ่Šโ€็š„ไบ‹ๆƒ…ๅฐฑไธๆƒณไธŠๆ‰‹ใ€‚ไนŸ่ฎธๅ› ไธบๆœ‰ไบบๅคธไฝ โ€œๆœ‰ๅคฉๅˆ†โ€่€Œ่ฎฉไฝ ่‡ช่ง†็”š้ซ˜๏ผŒไธๆ„ฟๆ„ๅš่ฟ™ไบ›\\\n็œ‹ไธŠๅŽปๅพˆ็ฌจๆ‹™็š„ไบ‹ๆƒ…๏ผŒๆ€•ๆœ‰่ดŸไฝ \"็ฅž็ซฅ\"็š„็งฐๅทใ€‚ไนŸ่ฎธไฝ ๅคช่ฟ‡ๆฟ€่ฟ›๏ผŒๆŠŠ่‡ชๅทฑ่ทŸๆœ‰20ๅคšๅนด็ป้ชŒ็š„\\\n็ผ–็จ‹่€ๆ‰‹็›ธๆฏ”๏ผŒ่ฎฉ่‡ชๅทฑๅคฑๅŽปไบ†ไฟกๅฟƒใ€‚\n\nไธ็ฎกๆ˜ฏไป€ไนˆๅŽŸๅ› ๏ผŒไฝ ไธ€ๅฎš่ฆๅšๆŒไธ‹ๅŽปใ€‚ๅฆ‚ๆžœไฝ ็ขฐๅˆฐๅšไธๅ‡บๆฅ็š„ๅŠ ๅˆ†ไน ้ข˜๏ผŒๆˆ–่€…็ขฐๅˆฐไธ€่Š‚\\\n็œ‹ไธๆ‡‚็š„ไน ้ข˜๏ผŒไฝ ๅฏไปฅๆš‚ๆ—ถ่ทณ่ฟ‡ๅŽป๏ผŒ่ฟ‡ไธ€้˜ตๅญๅ›žๆฅๅ†็œ‹ใ€‚ๅช่ฆๅšๆŒไธ‹ๅŽป๏ผŒไฝ ๆ€ปไผšๅผ„ๆ‡‚็š„ใ€‚\n\nไธ€ๅผ€ๅง‹ไฝ ๅฏ่ƒฝไป€ไนˆ้ƒฝ็œ‹ไธๆ‡‚ใ€‚่ฟ™ไผš่ฎฉไฝ ๆ„Ÿ่ง‰ๅพˆไธ่ˆ’ๆœ๏ผŒๅฐฑๅƒๅญฆไน ไบบ็ฑป็š„่‡ช็„ถ่ฏญ่จ€ไธ€ๆ ทใ€‚ไฝ \\\nไผšๅ‘็Žฐๅพˆ้šพ่ฎฐไฝไธ€ไบ›ๅ•่ฏๅ’Œ็‰นๆฎŠ็ฌฆๅท็š„็”จๆณ•๏ผŒ่€Œไธ”ไผš็ปๅธธๆ„Ÿๅˆฐๅพˆ่ฟท่Œซ๏ผŒ็›ดๅˆฐๆœ‰ไธ€ๅคฉ๏ผŒๅฟฝ็„ถ\\\nไธ€ไธ‹ๅญไฝ ไผš่ง‰ๅพ—่ฑ็„ถๅผ€ๆœ—๏ผŒไปฅๅ‰ไธๆ˜Ž็™ฝ็š„ไธœ่ฅฟๅฟฝ็„ถๅฐฑๆ˜Ž็™ฝไบ†ใ€‚ๅฆ‚ๆžœไฝ ๅšๆŒ็ปƒไน ไธ‹ๅŽป๏ผŒๅšๆŒ\\\nๅŽปไธŠไธ‹ๆฑ‚็ดข๏ผŒไฝ ๆœ€็ปˆไผšๅญฆไผš่ฟ™ไบ›ไธœ่ฅฟ็š„ใ€‚ไนŸ่ฎธไฝ ไธไผšๆˆไธบไธ€ไธช็ผ–็จ‹ๅคงๅธˆ๏ผŒไฝ†ไฝ ่‡ณๅฐ‘ไผšๆ˜Ž็™ฝ\\\n็จ‹ๅบๆ˜ฏๆ€Žไนˆๅทฅไฝœ็š„ใ€‚\n\nๅฆ‚ๆžœไฝ ๆ”พๅผƒ็š„่ฏ๏ผŒไฝ ไผšๅคฑๅŽป่พพๅˆฐ่ฟ™ไธช็จ‹ๅบฆ็š„ๆœบไผšใ€‚ไฝ ไผšๅœจ็ฌฌไธ€ๆฌก็ขฐๅˆฐไธๆ˜Ž็™ฝ็š„ไธœ่ฅฟๆ—ถ(ๅ‡ ไนŽ\\\nๆ˜ฏๆ‰€ๆœ‰็š„ไธœ่ฅฟ)ๆ”พๅผƒใ€‚ๅฆ‚ๆžœไฝ ๅšๆŒๅฐ่ฏ•๏ผŒๅšๆŒๅ†™ไน ้ข˜๏ผŒๅšๆŒๅฐ่ฏ•ๅผ„ๆ‡‚ไน ้ข˜็š„่ฏ๏ผŒไฝ ๆœ€็ปˆไธ€ๅฎšไผš\\\nๆ˜Ž็™ฝ้‡Œ่พน็š„ๅ†…ๅฎน็š„ใ€‚\n\nๅฆ‚ๆžœไฝ ้€š่ฏปไบ†่ฟ™ๆœฌไนฆ๏ผŒๅด่ฟ˜ๆ˜ฏไธ็Ÿฅ้“็ผ–็จ‹ๆ˜ฏๆ€Žไนˆๅ›žไบ‹ใ€‚้‚ฃไนŸๆฒกๅ…ณ็ณป๏ผŒ่‡ณๅฐ‘ไฝ ๅฐ่ฏ•่ฟ‡ไบ†ใ€‚ไฝ \\\nๅฏไปฅ่ฏดไฝ ๅทฒ็ปๅฐฝ่ฟ‡ๅŠ›ไฝ†ๆˆๆ•ˆไธไฝณ๏ผŒไฝ†่‡ณๅฐ‘ไฝ ๅฐ่ฏ•่ฟ‡ไบ†ใ€‚่ฟ™ไนŸๆ˜ฏไธ€ไปถๅ€ผๅพ—ไฝ ้ช„ๅ‚ฒ็š„ไบ‹ๆƒ…ใ€‚\n\n\n่ฎธๅฏๅ่ฎฎ\n==========\n\nไฝ ๅฏไปฅๅœจไธๆ”ถๅ–ไปปไฝ•่ดน็”จ๏ผŒ่€Œไธ”ไธไฟฎๆ”นไปปไฝ•ๅ†…ๅฎน็š„ๅ‰ๆไธ‹่‡ช็”ฑๅˆ†ๅ‘่ฟ™ๆœฌไนฆ็ป™ไปปไฝ•ไบบใ€‚ไฝ†ๆ˜ฏ\\\nๆœฌไนฆ็š„ๅ†…ๅฎนๅชๅ…่ฎธๅฎŒๆ•ดๅŽŸๅฐไธๅŠจๅœฐ่ฟ›่กŒๅˆ†ๅ‘ๅ’Œไผ ๆ’ญใ€‚\n\nCopyright (C) 2010 by Zed A. Shaw. \n" }, { "alpha_fraction": 0.6412929892539978, "alphanum_fraction": 0.6600625514984131, "avg_line_length": 18.9375, "blob_id": "1935090cd9ab6bd16fe4fefb6025f7d7c7a2e764", "content_id": "30b2bde4053b6576ffc3426d849fada3a20f545d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1991, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/cn/ex4.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 4: ๅ˜้‡(variable)ๅ’Œๅ‘ฝๅ\n*******************************\n \nไฝ ๅทฒ็ปๅญฆไผšไบ† ``print`` ๅ’Œ็ฎ—ๆœฏ่ฟ็ฎ—ใ€‚ไธ‹ไธ€ๆญฅไฝ ่ฆๅญฆ็š„ๆ˜ฏโ€œๅ˜้‡โ€ใ€‚ๅœจ็ผ–็จ‹ไธญ๏ผŒๅ˜้‡ๅชไธ่ฟ‡\\\nๆ˜ฏ็”จๆฅๆŒ‡ไปฃๆŸไธชไธœ่ฅฟ็š„ๅๅญ—ใ€‚็จ‹ๅบๅ‘˜้€š่ฟ‡ไฝฟ็”จๅ˜้‡ๅๅฏไปฅ่ฎฉไป–ไปฌ็š„็จ‹ๅบ่ฏป่ตทๆฅๆ›ดๅƒ่‹ฑ่ฏญใ€‚\\\n่€Œไธ”ๅ› ไธบ็จ‹ๅบๅ‘˜็š„่ฎฐๆ€ง้ƒฝไธๆ€Žไนˆๅœฐ๏ผŒๅ˜้‡ๅๅฏไปฅ่ฎฉไป–ไปฌๆ›ดๅฎนๆ˜“่ฎฐไฝ็จ‹ๅบ็š„ๅ†…ๅฎนใ€‚ๅฆ‚ๆžœไป–ไปฌ\\\nๆฒกๆœ‰ๅœจๅ†™็จ‹ๅบๆ—ถไฝฟ็”จๅฅฝ็š„ๅ˜้‡ๅ๏ผŒๅœจไธ‹ไธ€ๆฌก่ฏปๅˆฐๅŽŸๆฅๅ†™็š„ไปฃ็ ๆ—ถไป–ไปฌไผšๅคงไธบๅคด็–ผ็š„ใ€‚\n\nๅฆ‚ๆžœไฝ ่ขซ่ฟ™็ซ ไน ้ข˜้šพไฝไบ†็š„่ฏ๏ผŒ่ฎฐๅพ—ๆˆ‘ไปฌไน‹ๅ‰ๆ•™่ฟ‡็š„๏ผšๆ‰พๅˆฐไธๅŒ็‚นใ€ๆณจๆ„็ป†่Š‚ใ€‚\n\n1. ๅœจๆฏไธ€่กŒ็š„ไธŠ้ขๅ†™ไธ€่กŒๆณจ่งฃ๏ผŒ็ป™่‡ชๅทฑ่งฃ้‡Šไธ€ไธ‹่ฟ™ไธ€่กŒ็š„ไฝœ็”จใ€‚ \n2. ๅ€’็€่ฏปไฝ ็š„ ``.py`` ๆ–‡ไปถใ€‚\n3. ๆœ—่ฏปไฝ ็š„ ``.py`` ๆ–‡ไปถ๏ผŒๅฐ†ๆฏไธชๅญ—็ฌฆไนŸๆœ—่ฏปๅ‡บๆฅใ€‚ \n\n\n.. literalinclude:: ex/ex4.py\n :linenos:\n\n.. note::\n\n ``space_in_a_car`` ไธญ็š„ ``_`` ๆ˜ฏ ``ไธ‹ๅˆ’็บฟ(underscore)`` ๅญ—็ฌฆใ€‚ไฝ ่ฆ่‡ชๅทฑๅญฆไผš\\\n ๆ€Žๆ ทๆ‰“ๅ‡บ่ฟ™ไธชๅญ—็ฌฆๆฅใ€‚่ฟ™ไธช็ฌฆๅทๅœจๅ˜้‡้‡Œ้€šๅธธ่ขซ็”จไฝœๅ‡ๆƒณ็š„็ฉบๆ ผ๏ผŒ็”จๆฅ้š”ๅผ€ๅ•่ฏใ€‚ \n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n\n.. literalinclude:: ex/ex4.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\nๅฝ“ๆˆ‘ๅˆšๅผ€ๅง‹ๅ†™่ฟ™ไธช็จ‹ๅบๆ—ถๆˆ‘็Šฏไบ†ไธช้”™่ฏฏ๏ผŒpython ๅ‘Š่ฏ‰ๆˆ‘่ฟ™ๆ ท็š„้”™่ฏฏไฟกๆฏ๏ผš\n\n.. literalinclude:: ex/ex4.err\n\n็”จไฝ ่‡ชๅทฑ็š„่ฏ่งฃ้‡Šไธ€ไธ‹่ฟ™ไธช้”™่ฏฏไฟกๆฏ๏ผŒ่งฃ้‡Šๆ—ถ่ฎฐๅพ—ไฝฟ็”จ่กŒๅท๏ผŒ่€Œไธ”่ฆ่ฏดๆ˜ŽๅŽŸๅ› ใ€‚\n\nๆ›ดๅคš็š„ๅŠ ๅˆ†ไน ้ข˜:\n\n1. ่งฃ้‡Šไธ€ไธ‹ไธบไป€ไนˆ็จ‹ๅบ้‡Œ็”จไบ† 4.0 ่€Œไธๆ˜ฏ 4ใ€‚\n2. ่ฎฐไฝ 4.0 ๆ˜ฏไธ€ไธชโ€œๆตฎ็‚นๆ•ฐโ€๏ผŒ่‡ชๅทฑ็ ”็ฉถไธ€ไธ‹่ฟ™ๆ˜ฏไป€ไนˆๆ„ๆ€ใ€‚\n3. ๅœจๆฏไธ€ไธชๅ˜้‡่ต‹ๅ€ผ็š„ไธŠไธ€่กŒๅŠ ไธŠไธ€่กŒๆณจ่งฃใ€‚\n4. ่ฎฐไฝ ``=`` ็š„ๅๅญ—ๆ˜ฏ็ญ‰ไบŽ(equal)๏ผŒๅฎƒ็š„ไฝœ็”จๆ˜ฏไธบไธœ่ฅฟๅ–ๅใ€‚\n5. ่ฎฐไฝ ``_`` ๆ˜ฏไธ‹ๅˆ’็บฟๅญ—็ฌฆ(underscore)ใ€‚\n6. ๅฐ† ``python`` ไฝœไธบ่ฎก็ฎ—ๅ™จ่ฟ่กŒ่ตทๆฅ๏ผŒๅฐฑ่ทŸไปฅๅ‰ไธ€ๆ ท๏ผŒไธ่ฟ‡่ฟ™ไธ€ๆฌกๅœจ่ฎก็ฎ—่ฟ‡็จ‹ไธญไฝฟ็”จ\\\n ๅ˜้‡ๅๆฅๅš่ฎก็ฎ—๏ผŒๅธธ่ง็š„ๅ˜้‡ๅๆœ‰ ``i``, ``x``, ``j`` ็ญ‰็ญ‰ใ€‚\n\n\n" }, { "alpha_fraction": 0.723047137260437, "alphanum_fraction": 0.7301484942436218, "avg_line_length": 38.64102554321289, "blob_id": "d05d82e3d54fc18534d64b7990e428b14c14b19b", "content_id": "0bc7dc3a68b8325f3f09ee27f50a579f2d31ab14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1549, "license_type": "no_license", "max_line_length": 89, "num_lines": 39, "path": "/ex19.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 19: Functions And Variables\n************************************\n\nFunctions may have been a mind-blowing amount of information, but do not worry.\nJust keep doing these exercises and going through your checklist from the last\nexercise and you will eventually get it.\n\nThere is one tiny point though that you might not have realized which we'll reinforce\nright now: The variables in your function are not connected to the variables\nin your script. Here's an exercise to get you thinking about this:\n\n.. literalinclude:: ex/ex19.py\n :linenos:\n\nThis shows all different ways we're able to give our function ``cheese_and_crackers``\nthe values it needs to print them. We can give it straight numbers. We can\ngive it variables. We can give it math. We can even combine math and variables.\n\nIn a way, the arguments to a function are kind of like our ``=`` character\nwhen we make a variable. In fact, if you can use ``=`` to name something, \nyou can usually pass it to a function as an argument.\n\nWhat You Should See\n===================\n\nYou should study the output of this script and compare it with what you\nthink you should get for each of the examples in the script.\n\n.. literalinclude:: ex/ex19.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Go back through the script and type a comment above each line explaining in English\n what it does.\n2. Start at the bottom and read each line backwards, saying all the important characters.\n3. Write at least one more function of your own design, and run it 10 different ways.\n\n\n\n" }, { "alpha_fraction": 0.7134729623794556, "alphanum_fraction": 0.7176105380058289, "avg_line_length": 41.9555549621582, "blob_id": "bec410c938767720b03b53e3a7eeba9420ac9df6", "content_id": "cb8efcc55de06bb2a27505361c531c8308c82bfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3867, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/ex36.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 36: Designing and Debugging\n************************************\n\nNow that you know ``if-statements``, I'm going to give you some rules for\n``for-loops`` and ``while-loops`` that will keep you out of trouble. I'm\nalso going to give you some tips on debugging so that you can figure out\nproblems with your program. Finally, you are going to design a similar\nlittle game as in the last exercise but with a slight twist.\n\n\nRules For If-Statements\n=======================\n\n1. Every ``if-statement`` must have an ``else``.\n2. If this ``else`` should never be run because it doesn't\n make sense, then you must use a ``die`` function in the ``else`` that\n prints out an error message and dies, just like we did in\n the last exercise. This will find *many* errors.\n3. Never nest ``if-statements`` more than 2 deep and always try\n to do them 1 deep. This means if you put an ``if`` in an ``if`` \n then you should be looking to move that second ``if`` into\n another function.\n4. Treat ``if-statements`` like paragraphs, where each ``if,elif,else``\n grouping is like a set of sentences. Put blank lines before and\n after.\n5. Your boolean tests should be simple. If they are complex, move \n their calculations to variables earlier in your function and use\n a good name for the variable.\n\nIf you follow these simple rules, you will start writing better code than most\nprogrammers. Go back to the last exercise and see if I followed all of these\nrules. If not, fix it.\n\n.. warning::\n\n Never be a slave to the rules in real life. For training purposes\n you need to follow these rules to make your mind strong, but in \n real life sometimes these rules are just stupid. If you think a \n rule is stupid, try not using it.\n \n\n\nRules For Loops\n===============\n\n1. Use a ``while-loop`` only to loop forever, and that means probably\n never. This only applies to Python, other languages are different.\n2. Use a ``for-loop`` for all other kinds of looping, especially if\n there is a fixed or limited number of things to loop over.\n\n\nTips For Debugging\n==================\n\n1. Do not use a \"debugger\". A debugger is like doing a full-body\n scan on a sick person. You do not get any specific useful information,\n and you find a whole lot of information that doesn't help and is just\n confusing.\n2. The best way to debug a program is to use ``print`` to print\n out the values of variables at points in the program to see\n where they go wrong.\n3. Make sure parts of your programs work as you work on them. Do \n not write massive files of code before you try to run them.\n Code a little, run a little, fix a little.\n\n\nHomework\n========\n\nNow write a similar game to the one that I created in the last exercise. It\ncan be any kind of game you want in the same flavor. Spend a week on it making\nit as interesting as possible. For extra credit, use lists, functions, and\nmodules (remember those from Ex. 13?) as much as possible, and find as many new\npieces of Python as you can to make the game work.\n\nThere is one catch though, write up your idea for the game first. Before you\nstart coding you must write up a map for your game. Create the rooms,\nmonsters, and traps that the player must go through on paper before you code.\n\nOnce you have your map, try to code it up. If you find problems with the map\nthen adjust it and make the code match.\n\nOne final word of advice: Every programmer becomes paralyzed by irrational\nfear starting a new large project. They then use procrastination to avoid\nconfronting this fear and end up not getting their program working or even\nstarted. I do this. Everyone does this. The best way to avoid this is\nto make a list of things you should do, and then do them one at a time.\n\nJust start doing it, do a small version, make it bigger, \nkeep a list of things to do, and do them.\n\n" }, { "alpha_fraction": 0.714318573474884, "alphanum_fraction": 0.7214433550834656, "avg_line_length": 43.3775520324707, "blob_id": "d3eb9ea81ec3e5e8bdbe45b5d50c1fabe042fd64", "content_id": "42ac6f871b7927a0784a0060f859def2b7a82181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4351, "license_type": "no_license", "max_line_length": 98, "num_lines": 98, "path": "/ex18.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 18: Names, Variables, Code, Functions\n**********************************************\n\nBig title right? I am about to introduce you to *the function*!\nDum dum dah! Every programmer will go on and on about functions\nand all the different ideas about how they work and what they do,\nbut I will give you the simplest explanation you can use right now.\n\nFunctions do three things:\n\n1. They name pieces of code the way variables name strings and numbers.\n2. They take arguments the way your scripts take ``argv``.\n3. Using #1 and #2 they let you make your own \"mini scripts\" or \"tiny commands\".\n\nYou can create a function by using the word ``def`` in Python. I'm going to\nhave you make four different functions that work like your scripts, and then\nshow you how each one is related.\n\n.. literalinclude:: ex/ex18.py\n :linenos:\n\nLet's break down the first function, ``print_two`` which is the most similar\nto what you already know from making scripts:\n\n1. First we tell Python we want to make a function using ``def`` for \"define\".\n2. On the same line as ``def`` we then give the function a name, in this\n case we just called it \"print_two\" but it could be \"peanuts\" too. It doesn't matter,\n except that your function should have a short name that says what it does.\n3. Then we tell it we want ``*args`` (asterisk args) which is a lot like your ``argv`` parameter\n but for functions. This *has* to go inside ``()`` parenthesis to work.\n4. Then we end this line with a ``:`` colon, and start indenting.\n5. After the colon all the lines that are indented 4 spaces will become attached \n to this name, ``print_two``. Our first indented line is one that unpacks\n the arguments the same as with your scripts.\n6. To demonstrate how it works we print these arguments out, just like\n we would in a script.\n\nNow, the problem with ``print_two`` is that it's not the easiest way to make\na function. In Python we can skip the whole unpacking args and just use\nthe names we want right inside ``()``. That's what ``print_two_again`` does.\n\nAfter that you have an example of how you make a function that takes one\nargument in ``print_one``.\n\nFinally you have a function that has no arguments in ``print_none``.\n\n.. warning::\n\n This is very important. Do *not* get discouraged right now if this doesn't\n quite make sense. We're going to do a few exercises linking functions to\n your scripts and show you how to make more. For now just keep thinking\n \"mini script\" when I say \"function\" and keep playing with them.\n\n\nWhat You Should See\n===================\n\nIf you run the above script you should see:\n\n.. literalinclude:: ex/ex18.txt\n :language: console\n\nRight away you can see how a function works. Notice that you used your\nfunctions the way you use things like ``exists``, ``open``, and other\n\"commands\". In fact, I've been tricking you because in Python those \"commands\"\nare just functions. This means you can make your own commands and use\nthem in your scripts too.\n\n\nExtra Credit\n============\n\nWrite out a ``function checklist`` for later exercises. Write\nthese on an index card and keep it by you while you complete the rest\nof these exercises or until you feel you do not need it:\n\n1. Did you start your function definition with ``def``?\n2. Does your function name have only characters and ``_`` (underscore) characters?\n3. Did you put an open parenthesis ``(`` right after the function name?\n4. Did you put your arguments after the parenthesis ``(`` separated by commas?\n5. Did you make each argument unique (meaning no duplicated names).\n6. Did you put a close parenthesis and a colon ``):`` after the arguments?\n7. Did you indent all lines of code you want in the function 4 spaces? No more, no less.\n8. Did you \"end\" your function by going back to writing with no indent (``dedenting`` we call it)?\n\nAnd when you run (aka \"use\" or \"call\") a function, check these things:\n\n1. Did you call/use/run this function by typing its name?\n2. Did you put ``(`` character after the name to run it?\n3. Did you put the values you want into the parenthesis separated by commas?\n4. Did you end the function call with a ``)`` character.\n\nUse these two checklists on the remaining lessons until you do not need\nthem anymore.\n\nFinally, repeat this a few times:\n\n\"To 'run', 'call', or 'use' a function all mean the same thing.\"\n\n\n" }, { "alpha_fraction": 0.6938964128494263, "alphanum_fraction": 0.7133169174194336, "avg_line_length": 52.14754104614258, "blob_id": "adbd3f757ce1d10897a913969368a890882db5b4", "content_id": "0f280781f795df5d9b1743ad856268ff3666c3a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3244, "license_type": "no_license", "max_line_length": 240, "num_lines": 61, "path": "/ex25.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 25: Even More Practice\n*******************************\n\nWe're going to do some more practice involving functions and variables to make\nsure you know them well. This exercise should be straight forward for you to\ntype in, break down, and understand.\n\nHowever, this exercise is a little different. You won't be running it.\nInstead *you* will import it into your python and run the functions yourself.\n\n.. literalinclude:: ex/ex25.py\n :linenos:\n\nFirst, run this like normal with ``python ex25.py`` to find any errors you have\nmade. Once you have found all of the errors you can and fixed them, you will\nthen want to follow the WYSS section to complete the exercise.\n\n\nWhat You Should See\n===================\n\nIn this exercise we're going to interact with your ``.py`` file inside\nthe ``python`` interpreter you used periodically to do calculations.\n\nHere's what it looks like when I do it:\n\n.. literalinclude:: ex/ex25.txt\n :linenos:\n :language: pycon\n\nLet's break this down line by line to make sure you know what's going \non:\n\n* Line 5 you import *your* ``ex25.py`` python file, just like other imports you have done. Notice you do not need to put the ``.py`` at the end to import it. When you do this you make a ``module`` that has all your functions in it to use.\n* Line 6 you made a ``sentence`` to work with.\n* Line 7 you use the ``ex25`` module and call your first function ``ex25.break_words``. The ``.`` (dot, period) symbol is how you tell python, \"Hey, inside ``ex25`` there's a function called ``break_words`` and I want to run it.\"\n* Line 8 we just type ``words``, and python will print out what's in that variable (line 9). It looks weird but this is a ``list`` which you will learn about later.\n* Lines 10-11 we do the same thing with ``ex25.sort_words`` to get a sorted sentence.\n* Lines 13-16 we use ``ex25.print_first_word`` and ``ex25.print_last_word`` to get the first and last word printed out.\n* Line 17 is interesting. I made a mistake and typed the ``words`` variable as ``wrods`` so python gave me an error on Lines 18-20.\n* Line 21-22 is where we print the modified words list. Notice that since we printed the first and last one, those words are now missing.\n\nThe remaining lines are for you to figure out and analyze in the extra credit.\n\n\nExtra Credit\n============\n\n1. Take the remaining lines of the WYSS output and figure out what they are doing.\n Make sure you understand how you are running your functions in the ``ex25``\n module.\n2. Try doing this: ``help(ex25)`` and also ``help(ex25.break_words)``. Notice how\n you get help for your module, and how the help is those odd ``\"\"\"``\n strings you put after each function in ex25? Those special strings are called\n ``documentation comments`` and we'll be seeing more of them.\n3. Typing ``ex25.`` is annoying. A shortcut is to do your import like \n this: ``from ex25 import *`` which is like saying, \"Import everything\n from ex25.\" Programmers like saying things backwards. Start a new \n session and see how all your functions are right there.\n4. Try breaking your file and see what it looks like in ``python`` when you use it.\n You will have to quit python with CTRL-D (CTRL-Z on windows) to be able to reload it.\n\n\n" }, { "alpha_fraction": 0.6806842684745789, "alphanum_fraction": 0.6910192370414734, "avg_line_length": 22.190082550048828, "blob_id": "72945e2dbd91e3a0b12a91476f5e257fac911800", "content_id": "161f8ccf9110a71eefb0b2d1a3e2b1ba37f54694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5510, "license_type": "no_license", "max_line_length": 74, "num_lines": 121, "path": "/cn/ex46.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 46: ไธ€ไธช้กน็›ฎ้ชจๆžถ\n*******************************\n\n่ฟ™้‡Œไฝ ๅฐ†ๅญฆไผšๅฆ‚ไฝ•ๅปบ็ซ‹ไธ€ไธช้กน็›ฎโ€œ้ชจๆžถโ€็›ฎๅฝ•ใ€‚่ฟ™ไธช้ชจๆžถ็›ฎๅฝ•ๅ…ทๅค‡่ฎฉ้กน็›ฎ่ท‘่ตทๆฅ็š„ๆ‰€ๆœ‰ๅŸบๆœฌ\\\nๅ†…ๅฎนใ€‚ๅฎƒ้‡Œ่พนไผšๅŒ…ๅซไฝ ็š„้กน็›ฎๆ–‡ไปถๅธƒๅฑ€ใ€่‡ชๅŠจๅŒ–ๆต‹่ฏ•ไปฃ็ ๏ผŒๆจก็ป„๏ผŒไปฅๅŠๅฎ‰่ฃ…่„šๆœฌใ€‚ๅฝ“ไฝ ๅปบ็ซ‹\\\nไธ€ไธชๆ–ฐ้กน็›ฎ็š„ๆ—ถๅ€™๏ผŒๅช่ฆๆŠŠ่ฟ™ไธช็›ฎๅฝ•ๅคๅˆถ่ฟ‡ๅŽป๏ผŒๆ”นๆ”น็›ฎๅฝ•็š„ๅๅญ—๏ผŒๅ†็ผ–่พ‘้‡Œ่พน็š„ๆ–‡ไปถๅฐฑ่กŒไบ†ใ€‚\n\n้ชจๆžถๅ†…ๅฎน: Linux/OSX\n============================\n\n้ฆ–ๅ…ˆไฝฟ็”จไธ‹่ฟฐๅ‘ฝไปคๅˆ›ๅปบไฝ ็š„้ชจๆžถ็›ฎๅฝ•๏ผš\n\n.. code-block:: bash\n\n ~ $ mkdir -p projects\n ~ $ cd projects/\n ~/projects $ mkdir skeleton\n ~/projects $ cd skeleton\n ~/projects/skeleton $ mkdir bin NAME tests docs\n\nๆˆ‘ไฝฟ็”จไบ†ไธ€ไธชๅซ ``projects`` ็š„็›ฎๅฝ•๏ผŒ็”จๆฅๅญ˜ๆ”พๆˆ‘่‡ชๅทฑ็š„ๅ„ไธช้กน็›ฎใ€‚็„ถๅŽๆˆ‘ๅœจ้‡Œ่พน\\\nๅปบ็ซ‹ไบ†ไธ€ไธชๅซๅš ``skeleton`` ็š„ๆ–‡ไปถๅคน๏ผŒ่ฟ™ๅฐฑๆ˜ฏๆˆ‘ไปฌๆ–ฐ้กน็›ฎ็š„ๅŸบ็ก€็›ฎๅฝ•ใ€‚ๅ…ถไธญๅซๅš\\\n``NAME`` ็š„ๆ–‡ไปถๅคนๆ˜ฏไฝ ็š„้กน็›ฎ็š„ไธปๆ–‡ไปถๅคน๏ผŒไฝ ๅฏไปฅๅฐ†ๅฎƒไปปๆ„ๅ–ๅใ€‚\n\nๆŽฅไธ‹ๆฅๆˆ‘ไปฌ่ฆ้…็ฝฎไธ€ไบ›ๅˆๅง‹ๆ–‡ไปถ๏ผš\n\n.. code-block:: bash\n\n ~/projects/skeleton $ touch NAME/__init__.py\n ~/projects/skeleton $ touch tests/__init__.py\n\nไปฅไธŠๅ‘ฝไปคไธบไฝ ๅˆ›ๅปบไบ†็ฉบ็š„ๆจก็ป„็›ฎๅฝ•๏ผŒไปฅไพ›ไฝ ๅŽ้ขไธบๅ…ถๆทปๅŠ ไปฃ็ ใ€‚็„ถๅŽๆˆ‘ไปฌ้œ€่ฆ\\\nๅปบ็ซ‹ไธ€ไธช ``setup.py`` ๆ–‡ไปถ๏ผŒ่ฟ™ไธชๆ–‡ไปถๅœจๅฎ‰่ฃ…้กน็›ฎ็š„ๆ—ถๅ€™ๆˆ‘ไปฌไผš็”จๅˆฐๅฎƒ๏ผš\n\n.. literalinclude:: ex/ex46.py\n :linenos:\n\n็ผ–่พ‘่ฟ™ไธชๆ–‡ไปถ๏ผŒๆŠŠ่‡ชๅทฑ็š„่”็ณปๆ–นๅผๅ†™่ฟ›ๅŽป๏ผŒ็„ถๅŽๆ”พๅˆฐ้‚ฃ้‡Œๅฐฑ่กŒไบ†ใ€‚\n\nๆœ€ๅŽไฝ ้œ€่ฆไธ€ไธช็ฎ€ๅ•็š„ๆต‹่ฏ•ไธ“็”จ็š„้ชจๆžถๆ–‡ไปถๅซ ``tests/NAME_tests.py``\\๏ผš\n\n.. literalinclude:: ex/ex46_tests.py\n :linenos:\n\n\nPython ่ฝฏไปถๅŒ…็š„ๅฎ‰่ฃ…\n--------------------------\n\nไฝ ้œ€่ฆ้ข„ๅ…ˆๅฎ‰่ฃ…ไธ€ไบ›่ฝฏไปถๅŒ…๏ผŒไธ่ฟ‡้—ฎ้ข˜ๅฐฑๆฅไบ†ใ€‚ๆˆ‘็š„ๆœฌๆ„ๆ˜ฏ่ฎฉ่ฟ™ๆœฌไนฆ่ถŠๆธ…ๆ™ฐ่ถŠๅนฒๅ‡€่ถŠๅฅฝ๏ผŒ\\\nไธ่ฟ‡ๅฎ‰่ฃ…่ฝฏไปถ็š„ๆ–นๆณ•ๆ˜ฏๅœจๆ˜ฏๅคชๅคšไบ†๏ผŒๅฆ‚ๆžœๆˆ‘่ฆไธ€ๆญฅไธ€ๆญฅๅ†™ไธ‹ๆฅ๏ผŒ้‚ฃ 10 ้กต้ƒฝๅ†™ไธๅฎŒ๏ผŒ่€Œไธ”\\\nๅ‘Š่ฏ‰ไฝ ๅง๏ผŒๆˆ‘ๆœฌๆฅๅฐฑๆ˜ฏไธชๆ‡’ไบบใ€‚\n\nๆ‰€ไปฅๆˆ‘ไธไผšๆไพ›่ฏฆ็ป†็š„ๅฎ‰่ฃ…ๆญฅ้ชคไบ†๏ผŒๆˆ‘ๅชไผšๅ‘Š่ฏ‰ไฝ ้œ€่ฆๅฎ‰่ฃ…ๅ“ชไบ›ไธœ่ฅฟ๏ผŒ็„ถๅŽ่ฎฉไฝ ่‡ชๅทฑๆžๅฎšใ€‚\\\n่ฟ™ๅฏนไฝ ไนŸๆœ‰ๅฅฝๅค„๏ผŒๅ› ไธบไฝ ๅฐ†ๆ‰“ๅผ€ไธ€ไธชๅ…จๆ–ฐ็š„ไธ–็•Œ๏ผŒ้‡Œ่พนๅ……ๆปกไบ†ๅ…ถไป–ไบบๅ‘ๅธƒ็š„ Python ่ฝฏไปถใ€‚\n\nๆŽฅไธ‹ๆฅไฝ ้œ€่ฆๅฎ‰่ฃ…ไธ‹้ข็š„่ฝฏไปถๅŒ…๏ผš\n\n1. pip -- http://pypi.python.org/pypi/pip\n2. distribute -- http://pypi.python.org/pypi/distribute\n3. nose -- http://pypi.python.org/pypi/nose/\n4. virtualenv -- http://pypi.python.org/pypi/virtualenv\n\nไธ่ฆๅชๆ˜ฏๆ‰‹ๅŠจไธ‹่ฝฝๅนถไธ”ๅฎ‰่ฃ…่ฟ™ไบ›่ฝฏไปถๅŒ…๏ผŒไฝ ๅบ”่ฏฅ็œ‹ไธ€ไธ‹ๅˆซไบบ็š„ๅปบ่ฎฎ๏ผŒๅฐคๅ…ถ็œ‹็œ‹้’ˆๅฏนไฝ ็š„ๆ“ไฝœ\\\n็ณป็ปŸๅˆซไบบๆ˜ฏๆ€Žๆ ทๅปบ่ฎฎไฝ ๅฎ‰่ฃ…ๅ’Œไฝฟ็”จ็š„ใ€‚ๅŒๆ ท็š„่ฝฏไปถๅŒ…ๅœจไธไธ€ๆ ท็š„ๆ“ไฝœ็ณป็ปŸไธŠ้ข็š„ๅฎ‰่ฃ…ๆ–นๅผๆ˜ฏ\\\nไธไธ€ๆ ท็š„๏ผŒไธไธ€ๆ ท็‰ˆๆœฌ็š„ Linux ๅ’Œ OSX ไผšๆœ‰ไธๅŒ๏ผŒ่€Œ Windows ๆ›ดๆ˜ฏไธๅŒใ€‚\n\nๆˆ‘่ฆ้ข„ๅ…ˆ่ญฆๅ‘Šไฝ ๏ผŒ่ฟ™ไธช่ฟ‡็จ‹ไผšๆ˜ฏ็›ธๅฝ“ๆ— ่ถฃใ€‚ๅœจไธšๅ†…ๆˆ‘ไปฌๅฐ†่ฟ™็งไบ‹ๆƒ…ๅซๅš \"yak shaving(ๅ‰ƒ็‰ฆ็‰›)\"ใ€‚\\\nๅฎƒๆŒ‡็š„ๆ˜ฏๅœจไฝ ๅšไธ€ไปถๆœ‰ๆ„ไน‰็š„ไบ‹ๆƒ…ไน‹ๅ‰็š„ไธ€ไบ›ๅ‡†ๅค‡ๅทฅไฝœ๏ผŒ่€Œ่ฟ™ไบ›ๅ‡†ๅค‡ๅทฅไฝœๅˆๆ˜ฏๅŠๅ…ถๆ— ่Šๅ†—็น็š„ใ€‚\\\nไฝ ่ฆๅšไธ€ไธชๅพˆ้…ท็š„ Python ้กน็›ฎ๏ผŒไฝ†ๆ˜ฏๅˆ›ๅปบ้ชจๆžถ็›ฎๅฝ•้œ€่ฆไฝ ๅฎ‰่ฃ…ไธ€ไบ›่ฝฏไปถๅŒ…๏ผŒ่€Œๅฎ‰่ฃ…่ฝฏไปถๅŒ…ไน‹ๅ‰\\\nไฝ ่ฟ˜่ฆๅฎ‰่ฃ… package installer (่ฝฏไปถๅŒ…ๅฎ‰่ฃ…ๅทฅๅ…ท)๏ผŒ่€Œ่ฆๅฎ‰่ฃ…่ฟ™ไธชๅทฅๅ…ทไฝ ่ฟ˜ๅพ—ๅ…ˆๅญฆไผšๅฆ‚ไฝ•ๅœจ\\\nไฝ ็š„ๆ“ไฝœ็ณป็ปŸไธ‹ๅฎ‰่ฃ…่ฝฏไปถ๏ผŒ็œŸๆ˜ฏ็ƒฆไธ่ƒœ็ƒฆๅ‘€ใ€‚\n\nๆ— ่ฎบๅฆ‚ไฝ•๏ผŒ่ฟ˜ๆ˜ฏๅ…‹ๆœๅ›ฐ้šพๆŠŠใ€‚ไฝ ๅฐฑๆŠŠๅฎƒๅฝ“ๅš่ฟ›ๅ…ฅ็ผ–็จ‹ไฟฑไน้ƒจ็š„ไธ€ไธช่€ƒ้ชŒใ€‚ๆฏไธช็จ‹ๅบๅ‘˜้ƒฝไผš็ปๅކ่ฟ™\\\nๆก้“่ทฏ๏ผŒๅœจๆฏไธ€ๆฎตโ€œ้…ทโ€็š„่ƒŒๅŽๆ€ปไผšๆœ‰ไธ€ๆฎตโ€œ็ƒฆโ€็š„ใ€‚\n\nๆต‹่ฏ•ไฝ ็š„้…็ฝฎ\n==================\n\nๅฎ‰่ฃ…ไบ†ๆ‰€ๆœ‰ไธŠ้ข็š„่ฝฏไปถๅŒ…ไปฅๅŽ๏ผŒไฝ ๅฐฑๅฏไปฅๅšไธ‹้ข็š„ไบ‹ๆƒ…ไบ†๏ผš\n\n.. code-block:: bash\n\n ~/projects/skeleton $ nosetests\n .\n ----------------------------------------------------------------------\n Ran 1 test in 0.007s\n\n OK\n\nไธ‹ไธ€่Š‚็ปƒไน ไธญๆˆ‘ไผšๅ‘Š่ฏ‰ไฝ  ``nosetests`` ็š„ๅŠŸ่ƒฝ๏ผŒไธ่ฟ‡ๅฆ‚ๆžœไฝ ๆฒกๆœ‰็œ‹ๅˆฐไธŠ้ข็š„็”ป้ข๏ผŒ\\\n้‚ฃๅฐฑ่ฏดๆ˜Žไฝ ๅ“ช้‡Œๅ‡บ้”™ไบ†ใ€‚็กฎ่ฎคไธ€ไธ‹ไฝ ็š„ ``NAME`` ๅ’Œ ``tests`` ็›ฎๅฝ•ไธ‹ๅญ˜ๅœจ ``__init__.py``\\๏ผŒ\nๅนถไธ”ไฝ ๆฒกๆœ‰ๆŠŠ ``tests/NAME_tests.py`` ๅ‘ฝๅ้”™ใ€‚\n\n\nไฝฟ็”จ่ฟ™ไธช้ชจๆžถ\n==================\n\nๅ‰ƒ็‰ฆ็‰›็š„ไบ‹ๆƒ…ๅทฒ็ปๅš็š„ๅทฎไธๅคšไบ†๏ผŒไปฅๅŽๆฏๆฌกไฝ ่ฆๆ–ฐๅปบไธ€ไธช้กน็›ฎๆ—ถ๏ผŒๅช่ฆๅšไธ‹้ข็š„ไบ‹ๆƒ…ๅฐฑๅฏไปฅไบ†๏ผš\n\n1. ๆ‹ท่ด่ฟ™ไปฝ้ชจๆžถ็›ฎๅฝ•๏ผŒๆŠŠๅๅญ—ๆ”นๆˆไฝ ๆ–ฐ้กน็›ฎ็š„ๅๅญ—ใ€‚\n2. ๅ†ๅฐ† NAME ๆจก็ป„ๆ›ดๅไธบไฝ ้œ€่ฆ็š„ๅๅญ—๏ผŒๅฎƒๅฏไปฅๆ˜ฏไฝ ้กน็›ฎ็š„ๅๅญ—๏ผŒๅฝ“็„ถๅˆซ็š„ๅๅญ—ไนŸ่กŒใ€‚\n3. ็ผ–่พ‘ setup.py ่ฎฉๅฎƒๅŒ…ๅซไฝ ๆ–ฐ้กน็›ฎ็š„็›ธๅ…ณไฟกๆฏใ€‚\n4. ้‡ๅ‘ฝๅ ``tests/NAME_tests.py`` ๏ผŒ่ฎฉๅฎƒ็š„ๅๅญ—ๅŒน้…ๅˆฐไฝ ๆจก็ป„็š„ๅๅญ—ใ€‚\n5. ไฝฟ็”จ ``nosetests`` ๆฃ€ๆŸฅๆœ‰ๆ— ้”™่ฏฏใ€‚\n6. ๅผ€ๅง‹ๅ†™ไปฃ็ ๅงใ€‚\n\n\nๅฐๆต‹้ชŒ\n=============\n\n่ฟ™่Š‚็ปƒไน ๆฒกๆœ‰ๅŠ ๅˆ†ไน ้ข˜๏ผŒไธ่ฟ‡้œ€่ฆไฝ ๅšไธ€ไธชๅฐๆต‹้ชŒ๏ผš\n\n1. ๆ‰พๆ–‡ๆกฃ้˜…่ฏป๏ผŒๅญฆไผšไฝฟ็”จไฝ ๅ‰้ขๅฎ‰่ฃ…ไบ†็š„่ฝฏไปถๅŒ…ใ€‚\n2. ้˜…่ฏปๅ…ณไบŽ ``setup.py`` ็š„ๆ–‡ๆกฃ๏ผŒ็œ‹ๅฎƒ้‡Œ่พนๅฏไปฅๅšๅคšๅฐ‘้…็ฝฎใ€‚Python ็š„ๅฎ‰่ฃ…ๅ™จๅนถไธๆ˜ฏไธ€ไธช\\\n ๅฅฝ่ฝฏไปถ๏ผŒๆ‰€ไปฅไฝฟ็”จ่ตทๆฅไนŸ้žๅธธๅฅ‡ๆ€ชใ€‚\n3. ๅˆ›ๅปบไธ€ไธช้กน็›ฎ๏ผŒๅœจๆจก็ป„็›ฎๅฝ•้‡Œๅ†™ไธ€ไบ›ไปฃ็ ๏ผŒๅนถ่ฎฉ่ฟ™ไธชๆจก็ป„ๅฏไปฅ่ฟ่กŒใ€‚\n4. ๅœจ ``bin`` ็›ฎๅฝ•ไธ‹ๆ”พไธ€ไธชๅฏไปฅ่ฟ่กŒ็š„่„šๆœฌ๏ผŒๆ‰พๆๆ–™ๅญฆไน ไธ€ไธ‹ๆ€Žๆ ทๅˆ›ๅปบๅฏไปฅๅœจ็ณป็ปŸไธ‹่ฟ่กŒ็š„\n Python ่„šๆœฌใ€‚\n5. ๅœจไฝ ็š„ ``setup.py`` ไธญๅŠ ๅ…ฅ ``bin`` ่ฟ™ไธช็›ฎๅฝ•๏ผŒ่ฟ™ๆ ทไฝ ๅฎ‰่ฃ…ๆ—ถๅฐฑๅฏไปฅ่ฟžๅฎƒๅฎ‰่ฃ…่ฟ›ๅŽปใ€‚\n6. ไฝฟ็”จ ``setup.py`` ๅฎ‰่ฃ…ไฝ ็š„ๆจก็ป„๏ผŒๅนถ็กฎๅฎšๅฎ‰่ฃ…็š„ๆจก็ป„ๅฏไปฅๆญฃๅธธไฝฟ็”จ๏ผŒๆœ€ๅŽไฝฟ็”จ ``pip`` \n ๅฐ†ๅ…ถๅธ่ฝฝใ€‚\n" }, { "alpha_fraction": 0.6598984599113464, "alphanum_fraction": 0.6725888252258301, "avg_line_length": 22.156862258911133, "blob_id": "ff1d7f72004eef1fa45899e44ef3053b0b8d32f4", "content_id": "7af0e9fb80b4e5a8f01409f960b54fa4fc09d986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2446, "license_type": "no_license", "max_line_length": 76, "num_lines": 51, "path": "/cn/ex10.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 10: ้‚ฃๆ˜ฏไป€ไนˆ๏ผŸ\n***************************\n\nๅœจไน ้ข˜ 9 ไธญๆˆ‘ไฝ ๆŽฅ่งฆไบ†ไธ€ไบ›ๆ–ฐไธœ่ฅฟใ€‚ๆˆ‘่ฎฉไฝ ็œ‹ๅˆฐไธค็ง่ฎฉๅญ—็ฌฆไธฒๆ‰ฉๅฑ•ๅˆฐๅคš่กŒ็š„\\\nๆ–นๆณ•ใ€‚็ฌฌไธ€็งๆ–นๆณ•ๆ˜ฏๅœจๆœˆไปฝไน‹้—ด็”จ ``\\n`` (back-slash ``n`` )้š”ๅผ€ใ€‚่ฟ™ไธคไธชๅญ—็ฌฆ็š„ไฝœ็”จๆ˜ฏ\\\nๅœจ่ฏฅไฝ็ฝฎไธŠๆ”พๅ…ฅไธ€ไธชโ€œๆ–ฐ่กŒ(new line)โ€ๅญ—็ฌฆใ€‚\n\nไฝฟ็”จๅๆ–œๆ  ``\\`` (back-slash) ๅฏไปฅๅฐ†้šพๆ‰“ๅฐๅ‡บๆฅ็š„ๅญ—็ฌฆๆ”พๅˆฐๅญ—็ฌฆไธฒใ€‚้’ˆๅฏนไธๅŒ็š„็ฌฆๅท\\\nๆœ‰ๅพˆๅคš่ฟ™ๆ ท็š„ๆ‰€่ฐ“โ€œ่ฝฌไน‰ๅบๅˆ—(escape sequences)โ€๏ผŒไฝ†ๆœ‰ไธ€ไธช็‰นๆฎŠ็š„่ฝฌไน‰ๅบๅˆ—๏ผŒๅฐฑๆ˜ฏ ``ๅŒๅๆ–œๆ (double back-slash)`` \n``\\\\`` ใ€‚่ฟ™ไธคไธชๅญ—็ฌฆ็ป„ๅˆไผšๆ‰“ๅฐๅ‡บไธ€ไธชๅๆ–œๆ ๆฅใ€‚ๆŽฅไธ‹ๆฅๆˆ‘ไปฌๅšๅ‡ ไธช็ปƒไน ๏ผŒ็„ถๅŽไฝ ๅฐฑ็Ÿฅ้“่ฟ™ไบ›่ฝฌไน‰ๅบๅˆ—็š„ๆ„ไน‰ไบ†ใ€‚ \n\nๅฆๅค–ไธ€็ง้‡่ฆ็š„่ฝฌไน‰ๅบๅˆ—ๆ˜ฏ็”จๆฅๅฐ†ๅ•ๅผ•ๅท ``'`` ๅ’ŒๅŒๅผ•ๅท ``\"`` ่ฝฌไน‰ใ€‚ๆƒณ่ฑกไฝ ๆœ‰ไธ€ไธช็”จ\\\nๅŒๅผ•ๅทๅผ•็”จ่ตทๆฅ็š„ๅญ—็ฌฆไธฒ๏ผŒไฝ ๆƒณ่ฆๅœจๅญ—็ฌฆไธฒ็š„ๅ†…ๅฎน้‡Œๅ†ๆทปๅŠ ไธ€็ป„ๅŒๅผ•ๅท่ฟ›ๅŽป๏ผŒๆฏ”ๅฆ‚ไฝ ๆƒณ่ฏด\\\n``\"I \"understand\" joe.\"``\\๏ผŒPython ๅฐฑไผš่ฎคไธบ ``\"understand\"`` ๅ‰ๅŽ็š„ไธคไธชๅผ•ๅทๆ˜ฏๅญ—็ฌฆไธฒ\\\n็š„่พน็•Œ๏ผŒไปŽ่€ŒๆŠŠๅญ—็ฌฆไธฒๅผ„้”™ใ€‚ไฝ ้œ€่ฆไธ€็งๆ–นๆณ•ๅ‘Š่ฏ‰ python ๅญ—็ฌฆไธฒ้‡Œ่พน็š„ๅŒๅผ•ๅทไธๆ˜ฏ็œŸๆญฃ\\ \n็š„ๅŒๅผ•ๅทใ€‚\n\n่ฆ่งฃๅ†ณ่ฟ™ไธช้—ฎ้ข˜๏ผŒไฝ ้œ€่ฆๅฐ†ๅŒๅผ•ๅทๅ’Œๅ•ๅผ•ๅท่ฝฌไน‰๏ผŒ่ฎฉ Python ๅฐ†ๅผ•ๅทไนŸๅŒ…ๅซๅˆฐๅญ—็ฌฆไธฒ้‡Œ่พนๅŽปใ€‚่ฟ™้‡Œ\\\nๆœ‰ไธ€ไธชไพ‹ๅญ๏ผš\n\n.. code-block:: python\n\n \"I am 6'2\\\" tall.\" # ๅฐ†ๅญ—็ฌฆไธฒไธญ็š„ๅŒๅผ•ๅท่ฝฌไน‰\n 'I am 6\\'2\" tall.' # ๅฐ†ๅญ—็ฌฆไธฒ็ง็š„ๅ•ๅผ•ๅท่ฝฌไน‰\n\n็ฌฌไบŒ็งๆ–นๆณ•ๆ˜ฏไฝฟ็”จโ€œไธ‰ๅผ•ๅท(triple-quotes)โ€๏ผŒไนŸๅฐฑๆ˜ฏ ``\"\"\"``\\๏ผŒไฝ ๅฏไปฅๅœจไธ€็ป„ไธ‰ๅผ•ๅทไน‹้—ดๆ”พๅ…ฅ\\\nไปปๆ„ๅคš่กŒ็š„ๆ–‡ๅญ—ใ€‚ๆŽฅไธ‹ๆฅไฝ ๅฐ†็œ‹ๅˆฐ็”จๆณ•ใ€‚\n\n\n\n.. literalinclude:: ex/ex10.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๆณจๆ„ไฝ ๆ‰“ๅฐๅ‡บๆฅ็š„ๅˆถ่กจ็ฌฆ๏ผŒ่ฟ™่Š‚็ปƒไน ไธญ็š„ๆ–‡ๅญ—้—ด้š”ๅฏนไบŽ็ญ”ๆกˆ็š„ๆญฃ็กฎๆ€งๆ˜ฏๅพˆ้‡่ฆ็š„ใ€‚\n\n.. literalinclude:: ex/ex10.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ไธŠ็ฝ‘ๆœ็ดขไธ€ไธ‹่ฟ˜ๆœ‰ๅ“ชไบ›ๅฏ็”จ็š„่ฝฌไน‰ๅญ—็ฌฆใ€‚\n2. ไฝฟ็”จ ``'''`` (ไธ‰ไธชๅ•ๅผ•ๅท)ๅ–ไปฃไธ‰ไธชๅŒๅผ•ๅท๏ผŒ็œ‹็œ‹ๆ•ˆๆžœๆ˜ฏไธๆ˜ฏไธ€ๆ ท็š„๏ผŸ\n3. ๅฐ†่ฝฌไน‰ๅบๅˆ—ๅ’Œๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒๆ”พๅˆฐไธ€่ตท๏ผŒๅˆ›ๅปบไธ€็งๆ›ดๅคๆ‚็š„ๆ ผๅผใ€‚\n4. ่ฎฐๅพ— ``%r`` ๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒๅ—๏ผŸไฝฟ็”จ ``%r`` ๆญ้…ๅ•ๅผ•ๅทๅ’ŒๅŒๅผ•ๅท่ฝฌไน‰ๅญ—็ฌฆๆ‰“ๅฐไธ€ไบ›ๅญ—็ฌฆไธฒๅ‡บๆฅใ€‚\n ๅฐ† %r ๅ’Œ %s ๆฏ”่พƒไธ€ไธ‹ใ€‚ \n ๆณจๆ„ๅˆฐไบ†ๅ—๏ผŸ%r ๆ‰“ๅฐๅ‡บๆฅ็š„ๆ˜ฏไฝ ๅ†™ๅœจ่„šๆœฌ้‡Œ็š„ๅ†…ๅฎน๏ผŒ่€Œ %s ๆ‰“ๅฐ็š„ๆ˜ฏไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„ๅ†…ๅฎนใ€‚\n\n" }, { "alpha_fraction": 0.6124197244644165, "alphanum_fraction": 0.6231263279914856, "avg_line_length": 21.14285659790039, "blob_id": "0f901ff3bdaeb4c79285323a3a3b057e2e8193f2", "content_id": "e1aab4442a60a72ba6fa3da075710e8235753988", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 467, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/ex8.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 8: Printing, Printing\n******************************\n\n.. literalinclude:: ex/ex8.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex8.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Do your checks of your work, write down your mistakes, try not to make them \n on the next exercise.\n2. Notice that the last line of output uses both single and double \n quotes for individual pieces. Why do you think that is? \n\n" }, { "alpha_fraction": 0.6959821581840515, "alphanum_fraction": 0.7017857432365417, "avg_line_length": 39.727272033691406, "blob_id": "f7fe3c3874e9eb4592ae0147ff3a62b3d5e46060", "content_id": "7b1408364ba9fd3f8e1a6a9b1f1f32fd151a6fea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2240, "license_type": "no_license", "max_line_length": 79, "num_lines": 55, "path": "/ex17.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 17: More Files\n***********************\n\nNow let's do a few more things with files. We're going to actually write a \nPython script to copy one file to another. It'll be very short but will give\nyou some ideas about other things you can do with files.\n\n.. literalinclude:: ex/ex17.py\n :linenos:\n\nYou should immediately notice that we ``import`` another handy command\nnamed ``exists``. This returns ``True`` if a file exists, based on its\nname in a string as an argument. It returns ``False`` if not. We'll be using\nthis function in the second half of this book to do lots of things, but\nright now you should see how you can import it.\n\nUsing ``import`` is a way to get tons of free code other better (well, usually)\nprogrammers have written so you do not have to write it.\n\n\nWhat You Should See\n===================\n\nJust like your other scripts, run this one with two arguments, the file to copy\nfrom and the file to copy it to. If we use your ``test.txt`` file from before\nwe get this:\n\n.. literalinclude:: ex/ex17.txt\n :language: console\n\nIt should work with any file. Try a bunch more and see what happens. Just\nbe careful you do not blast an important file.\n\n.. warning::\n\n Did you see that trick I did with ``cat``? It only works on Linux or OSX,\n on Windows use ``type`` to do the same thing.\n\n\nExtra Credit\n============\n\n1. Go read up on Python's ``import`` statement, and start ``python`` to try\n it out. Try importing some things and see if you can get it right. It's \n alright if you do not.\n2. This script is *really* annoying. There's no need to ask you before \n doing the copy, and it prints too much out to the screen. Try to make it\n more friendly to use by removing features.\n3. See how short you can make the script. I could make this 1 line long.\n4. Notice at the end of the WYSS I used something called `cat`? It's an old\n command that \"con*cat*enates\" files together, but mostly it's just an\n easy way to print a file to the screen. Type ``man cat`` to read about it.\n5. Windows people, find the alternative to ``cat`` that Linux/OSX\n people have. Do not worry about ``man`` since there is nothing like that.\n6. Find out why you had to do ``output.close()`` in the code.\n" }, { "alpha_fraction": 0.5615763664245605, "alphanum_fraction": 0.5802955627441406, "avg_line_length": 19.108911514282227, "blob_id": "7e83c5ee42dfe516989656d0fc3294ec37800778", "content_id": "a381f8f0a5e27876dd8bf56526be56dd8ea2cff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3320, "license_type": "no_license", "max_line_length": 76, "num_lines": 101, "path": "/cn/ex40.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 40: ๅญ—ๅ…ธ, ๅฏ็ˆฑ็š„ๅญ—ๅ…ธ\n**********************************************\n\nๆŽฅไธ‹ๆฅๆˆ‘่ฆๆ•™ไฝ ๅฆๅค–ไธ€็ง่ฎฉไฝ ไผค่„‘็ญ‹็š„ๅฎนๅ™จๅž‹ๆ•ฐๆฎ็ป“ๆž„๏ผŒๅ› ไธบไธ€ๆ—ฆไฝ ๅญฆไผš่ฟ™็งๅฎนๅ™จ๏ผŒ\\\nไฝ ๅฐ†ๆ‹ฅๆœ‰่ถ…้…ท็š„่ƒฝๅŠ›ใ€‚่ฟ™ๆ˜ฏๆœ€ๆœ‰็”จ็š„ๅฎนๅ™จ๏ผšๅญ—ๅ…ธ(dictionary)ใ€‚\n\nPython ๅฐ†่ฟ™็งๆ•ฐๆฎ็ฑปๅž‹ๅซๅš \"dict\"๏ผŒๆœ‰็š„่ฏญ่จ€้‡Œๅฎƒ็š„ๅ็งฐๆ˜ฏ \"hash\"ใ€‚่ฟ™ไธค็งๅๅญ—\\\nๆˆ‘้ƒฝไผš็”จๅˆฐ๏ผŒไธ่ฟ‡่ฟ™ๅนถไธ้‡่ฆ๏ผŒ้‡่ฆ็š„ๆ˜ฏๅฎƒไปฌๅ’Œๅˆ—่กจ็š„ๅŒบๅˆซใ€‚ไฝ ็œ‹๏ผŒ้’ˆๅฏนๅˆ—่กจไฝ ๅฏไปฅๅš\\\n่ฟ™ๆ ท็š„ไบ‹ๆƒ…๏ผš\n\n\n.. code-block:: pycon\n\n >>> things = ['a', 'b', 'c', 'd']\n >>> print things[1]\n b\n >>> things[1] = 'z'\n >>> print things[1]\n z\n >>> print things\n ['a', 'z', 'c', 'd']\n >>> \n\nไฝ ๅฏไปฅไฝฟ็”จๆ•ฐๅญ—ไฝœไธบๅˆ—่กจ็š„็ดขๅผ•๏ผŒไนŸๅฐฑๆ˜ฏไฝ ๅฏไปฅ้€š่ฟ‡ๆ•ฐๅญ—ๆ‰พๅˆฐๅˆ—่กจไธญ็š„ๅ…ƒ็ด ใ€‚่€Œ ``dict``\nๆ‰€ไฝœ็š„๏ผŒๆ˜ฏ่ฎฉไฝ ๅฏไปฅ้€š่ฟ‡ไปปไฝ•ไธœ่ฅฟๆ‰พๅˆฐๅ…ƒ็ด ๏ผŒไธๅชๆ˜ฏๆ•ฐๅญ—ใ€‚ๆ˜ฏ็š„๏ผŒๅญ—ๅ…ธๅฏไปฅๅฐ†ไธ€ไธช็‰ฉไปถ\\\nๅ’Œๅฆๅค–ไธ€ไธชไธœ่ฅฟๅ…ณ่”๏ผŒไธ็ฎกๅฎƒไปฌ็š„็ฑปๅž‹ๆ˜ฏไป€ไนˆ๏ผŒๆˆ‘ไปฌๆฅ็œ‹็œ‹๏ผš\n\n.. code-block:: pycon\n\n >>> stuff = {'name': 'Zed', 'age': 36, 'height': 6*12+2}\n >>> print stuff['name']\n Zed\n >>> print stuff['age']\n 36\n >>> print stuff['height']\n 74\n >>> stuff['city'] = \"San Francisco\"\n >>> print stuff['city']\n San Francisco\n >>> \n\nไฝ ๅฐ†็œ‹ๅˆฐ้™คไบ†้€š่ฟ‡ๆ•ฐๅญ—ไปฅๅค–๏ผŒๆˆ‘ไปฌ่ฟ˜ๅฏไปฅ็”จๅญ—็ฌฆไธฒๆฅไปŽๅญ—ๅ…ธไธญ่Žทๅ– ``stuff`` ๏ผŒๆˆ‘ไปฌ\\\n่ฟ˜ๅฏไปฅ็”จๅญ—็ฌฆไธฒๆฅๅพ€ๅญ—ๅ…ธไธญๆทปๅŠ ๅ…ƒ็ด ใ€‚ๅฝ“็„ถๅฎƒๆ”ฏๆŒ็š„ไธๅชๆœ‰ๅญ—็ฌฆไธฒ๏ผŒๆˆ‘ไปฌ่ฟ˜ๅฏไปฅๅš\\\n่ฟ™ๆ ท็š„ไบ‹ๆƒ…๏ผš\n\n.. code-block:: pycon\n\n >>> stuff[1] = \"Wow\"\n >>> stuff[2] = \"Neato\"\n >>> print stuff[1]\n Wow\n >>> print stuff[2]\n Neato\n >>> print stuff\n {'city': 'San Francisco', 2: 'Neato', \n 'name': 'Zed', 1: 'Wow', 'age': 36, \n 'height': 74}\n >>>\n\nๅœจ่ฟ™้‡Œๆˆ‘ไฝฟ็”จไบ†ไธคไธชๆ•ฐๅญ—ใ€‚ๅ…ถๅฎžๆˆ‘ๅฏไปฅไฝฟ็”จไปปไฝ•ไธœ่ฅฟ๏ผŒไธ่ฟ‡่ฟ™ไนˆ่ฏดๅนถไธๅ‡†็กฎ๏ผŒไธ่ฟ‡ไฝ ๅ…ˆ\\\n่ฟ™ไนˆ็†่งฃๅฐฑ่กŒไบ†ใ€‚\n\nๅฝ“็„ถไบ†๏ผŒไธ€ไธชๅช่ƒฝๆ”พไธœ่ฅฟ่ฟ›ๅŽป็š„ๅญ—ๅ…ธๆ˜ฏๆฒกๅ•ฅๆ„ๆ€็š„๏ผŒๆ‰€ไปฅๆˆ‘ไปฌ่ฟ˜่ฆๆœ‰ๅˆ ้™ค็‰ฉไปถ็š„ๆ–นๆณ•๏ผŒ\\\nไนŸๅฐฑๆ˜ฏไฝฟ็”จ ``del`` ่ฟ™ไธชๅ…ณ้”ฎๅญ—๏ผš\n\n.. code-block:: pycon\n \n >>> del stuff['city']\n >>> del stuff[1]\n >>> del stuff[2]\n >>> stuff\n {'name': 'Zed', 'age': 36, 'height': 74}\n >>>\n\nๆŽฅไธ‹ๆฅๆˆ‘ไปฌ่ฆๅšไธ€ไธช็ปƒไน ๏ผŒไฝ ๅฟ…้กป้žๅธธไป”็ป†๏ผŒๆˆ‘่ฆๆฑ‚ไฝ ๅฐ†่ฟ™ไธช็ปƒไน ๅ†™ไธ‹ๆฅ๏ผŒ็„ถๅŽ่ฏ•็€ๅผ„ๆ‡‚\\\nๅฎƒๅšไบ†ไบ›ไป€ไนˆใ€‚่ฟ™ไธช็ปƒไน ๅพˆๆœ‰่ถฃ๏ผŒๅšๅฎŒไปฅๅŽไฝ ๅฏ่ƒฝไผšๆœ‰่ฑ็„ถๅผ€ๆœ—็š„ๆ„Ÿ่ง‰ใ€‚\n\n\n.. literalinclude:: ex/ex40.py\n :linenos:\n\n\n.. warning:: ๆณจๆ„ๅˆฐๆˆ‘็”จไบ† ``themap`` ่€Œไธๆ˜ฏ ``map`` ไบ†ๅง๏ผŸ่ฟ™ๆ˜ฏๅ› ไธบ Python ๅทฒ็ปๆœ‰\\\n ไธ€ไธชๅ‡ฝๆ•ฐ็งฐไฝœ map ไบ†๏ผŒๆ‰€ไปฅๅฆ‚ๆžœไฝ ็”จ map ๅšๅ˜้‡ๅ๏ผŒไฝ ๅŽ้ขๅฏ่ƒฝไผš็ขฐๅˆฐ้—ฎ้ข˜ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex40.txt\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅœจ Python ๆ–‡ๆกฃไธญๆ‰พๅˆฐ dictionary (ๅˆ่ขซ็งฐไฝœ dicts, dict)็š„็›ธๅ…ณ็š„ๅ†…ๅฎน๏ผŒๅญฆ็€ๅฏน dict ๅšๆ›ดๅคš็š„ๆ“ไฝœใ€‚\n2. ๆ‰พๅ‡บไธ€ไบ› dict ๆ— ๆณ•ๅšๅˆฐ็š„ไบ‹ๆƒ…ใ€‚ไพ‹ๅฆ‚ๆฏ”่พƒ้‡่ฆ็š„ไธ€ไธชๅฐฑๆ˜ฏ dict ็š„ๅ†…ๅฎนๆ˜ฏๆ— ๅบ็š„๏ผŒไฝ ๅฏไปฅๆฃ€ๆŸฅไธ€ไธ‹็œ‹็œ‹\\\n ๆ˜ฏๅฆ็œŸๆ˜ฏ่ฟ™ๆ ทใ€‚\n3. ่ฏ•็€ๆŠŠ ``for-loop`` ๆ‰ง่กŒๅˆฐ dict ไธŠ้ข๏ผŒ็„ถๅŽ่ฏ•็€ๅœจ for-loop ไธญไฝฟ็”จ dict ็š„ ``items()`` ๅ‡ฝๆ•ฐ๏ผŒ็œ‹็œ‹\\\n ไผšๆœ‰ไป€ไนˆๆ ท็š„็ป“ๆžœใ€‚" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.5325203537940979, "avg_line_length": 12.61111068725586, "blob_id": "983e410668a554234ab4582fbd6411cab56e6d42", "content_id": "7273a026a06e8d0185b57281574fe1044805d5e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 414, "license_type": "no_license", "max_line_length": 37, "num_lines": 18, "path": "/cn/ex8.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 8: ๆ‰“ๅฐ๏ผŒๆ‰“ๅฐ\n******************************\n\n.. literalinclude:: ex/ex8.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex8.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ่‡ชๅทฑๆฃ€ๆŸฅ็ป“ๆžœ๏ผŒ่ฎฐๅฝ•ไฝ ็Šฏ่ฟ‡็š„้”™่ฏฏ๏ผŒๅนถไธ”ๅœจไธ‹ไธช็ปƒไน ไธญๅฐฝ้‡ไธ็ŠฏๅŒๆ ท็š„้”™่ฏฏใ€‚\n2. ๆณจๆ„ๆœ€ๅŽไธ€่กŒ็จ‹ๅบไธญๆ—ขๆœ‰ๅ•ๅผ•ๅทๅˆๆœ‰ๅŒๅผ•ๅท๏ผŒไฝ ่ง‰ๅพ—ๅฎƒๆ˜ฏๅฆ‚ไฝ•ๅทฅไฝœ็š„๏ผŸ\n\n" }, { "alpha_fraction": 0.7481127977371216, "alphanum_fraction": 0.7484208941459656, "avg_line_length": 54.46154022216797, "blob_id": "c747b98e2080690fe6831d9da8add00c2ef80a08", "content_id": "fd244162647c531f0a372c01b97b61434af95cb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6491, "license_type": "no_license", "max_line_length": 125, "num_lines": 117, "path": "/ex44.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 44: Evaluating Your Game\n*********************************\n\nIn this exercise you will evaluate the game you just made. Maybe you got\npart-way through it and you got stuck. Maybe you got it working but just barely.\nEither way, we're going to go through a bunch of things you should know now and \nmake sure you covered them in your game. We're going to study how to properly\nformat a class, common conventions in using classes, and a lot of \"textbook\" knowledge.\n\nWhy would I have you try to do it yourself and then show you how to do it right?\nFrom now on in the book I'm going to try to make you self-sufficient. I've been\nholding your hand mostly this whole time, and I can't do that for much longer.\nI'm now instead going to give you things to do, have you do them on your own,\nand then give you ways to improve what you did.\n\nYou will struggle at first and probably be very frustrated but stick\nwith it and eventually you will build a mind for solving problems. You will start\nto find creative solutions to problems rather than just copy solutions out of\ntextbooks.\n\nFunction Style\n==============\n\nAll the other rules I've taught you about how to make a function nice apply here, but\nadd these things:\n\n* For various reasons, programmers call functions that are part of classes ``methods``.\n It's mostly marketing but just be warned that every time you say \"function\" they'll\n annoyingly correct you and say \"method\". If they get too annoying, just ask them to\n demonstrate the mathematical basis that determines how a \"method\" is different from\n a \"function\" and they'll shut up.\n* When you work with classes much of your time is spent talking about making the class\n \"do things\". Instead of naming your functions after what the function does, instead\n name it as if it's a command you are giving to the class. Same as ``pop`` is saying\n \"Hey list, ``pop`` this off.\" It isn't called ``remove_from_end_of_list`` because\n even though that's what it does, that's not a *command* to a list.\n* Keep your functions small and simple. For some reason when people start learning about\n classes they forget this.\n\n\nClass Style\n===========\n\n* Your class should use \"camel case\" like ``SuperGoldFactory`` rather than ``super_gold_factory``.\n* Try not to do too much in your ``__init__`` functions. It makes them harder to use.\n* Your other functions should use \"underscore format\" so write ``my_awesome_hair`` and not\n ``myawesomehair`` or ``MyAwesomeHair``.\n* Be consistent in how you organize your function arguments. If your class has to deal\n with users, dogs, and cats, keep that order throughout unless it really doesn't make\n sense. If you have one function takes ``(dog, cat, user)`` and the other takes ``(user, cat, dog)``, it'll be hard to use.\n* Try not to use variables that come from the module or globals. They should be fairly\n self-contained.\n* A foolish consistency is the hobgoblin of little minds. Consistency is good, but foolishly\n following some idiotic mantra because everyone else does is bad style. Think for yourself.\n* Always, *always* have ``class Name(object)`` format or else you will be in big trouble.\n\n\nCode Style\n==========\n\n* Give your code vertical space so people can read it. You will find some very\n bad programmers who are able to write reasonable code, but who do not add\n *any* spaces. This is bad style in any language because the human eye and\n brain use space and vertical alignment to scan and separate visual elements.\n Not having space is the same as giving your code an awesome camouflage paint job.\n* If you can't read it out loud, it's probably hard to read. If you are having a\n problem making something easy to use, try reading it out loud. Not only\n does this force you to slow down and really read it, but it also helps you find\n difficult passages and things to change for readability.\n* Try to do what other people are doing in Python until you find your own style.\n* Once you find your own style, do not be a jerk about it. Working with other people's\n code is part of being a programmer, and other people have really bad taste.\n Trust me, you will probably have really bad taste too and not even realize it.\n* If you find someone who writes code in a style you like, try writing something\n that mimics their style.\n\nGood Comments\n=============\n\n* There are programmers who will tell you that your code should be readable\n enough that you do not need comments. They'll then tell you in their most\n official sounding voice that, \"Ergo you should never write comments.\" Those\n programmers are either consultants who get paid more if other people can't\n use their code, or incompetents who tend to never work with other people.\n Ignore them and write comments.\n* When you write comments, describe *why* you are doing what\n you are doing. The code already says how, but why you did things the way\n you did is more important.\n* When you write doc comments for your functions , make the\n comments documentation for someone who will have to use your code. You do not\n have to go crazy, but a nice little sentence about what someone does with\n that function helps a lot.\n* Finally, while comments are good, too many are bad, and you have to \n maintain them. Keep your comments relatively short and to the point,\n and if you change a function, review the comment to make sure it's still\n correct.\n\nEvaluate Your Game\n==================\n\nI want you now to pretend you are me. Adopt a very stern look, print out your\ncode, and take a red pen and mark every mistake you find. Anything from\nthis exercise and from other things you have known. Once you are done marking\nyour code up, I want you to fix everything you came up with. Then repeat this\na couple of times, looking for anything that could be better. Use all the \ntricks I've given you to break your code down into the smallest tiniest\nlittle analysis you can.\n\nThe purpose of this exercise is to train your attention to detail on classes.\nOnce you are done with this bit of code, find someone else's code and do the \nsame thing. Go through a printed copy of some part of it and point out all\nthe mistakes and style errors you find. Then fix it and see if your fixes\ncan be done without breaking their program.\n\nI want you to do nothing but evaluate and fix code for the week. Your own code\nand other people's. It'll be pretty hard work, but when you are done your brain\nwill be wired tight like a boxer's hands.\n\n\n" }, { "alpha_fraction": 0.6935291886329651, "alphanum_fraction": 0.6994839310646057, "avg_line_length": 40.26229476928711, "blob_id": "f0d3c90f15b1ff62087db537631d1eaa3fe3249b", "content_id": "6a161cd1c264d2d30eced72ed8b66724cfaf0665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2519, "license_type": "no_license", "max_line_length": 101, "num_lines": 61, "path": "/ex10.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 10: What Was That?\n***************************\n\nIn Exercise 9 I threw you some new stuff, just to keep you on your toes. I\nshowed you two ways to make a string that goes across multiple lines. In the\nfirst way, I put the characters ``\\n`` (back-slash ``n``) between the names of\nthe months. What these two characters do is put a ``new line character`` into\nthe string at that point.\n\nThis use of the ``\\`` (back-slash) character is a way we can put difficult-to-type\ncharacters into a string. There are plenty of these \"escape sequences\" available\nfor different characters you might want to put in, but there's a special one,\nthe ``double back-slash`` which is just two of them ``\\\\``. These two characters\nwill print just one back-slash. We'll try a few of these sequences so you can\nsee what I mean.\n\nAnother important escape sequence is to escape a single-quote ``'`` or\ndouble-quote ``\"``. Imagine you have a string that uses double-quotes\nand you want to put a double-quote in for the output. If you do this ``\"I \"understand\" joe.\"``\nthen Python will get confused since it will think the ``\"`` around ``\"understand\"``\nactually *ends* the string. You need a way to tell Python that the ``\"`` inside\nthe string isn't a *real* double-quote.\n\nTo solve this problem you *escape* double-quotes and single-quotes so Python\nknows to include in the string. Here's an example:\n\n.. code-block:: python\n\n \"I am 6'2\\\" tall.\" # escape double-quote inside string\n 'I am 6\\'2\" tall.' # escape single-quote inside string\n\nThe second way is by using triple-quotes, which is just ``\"\"\"`` and works\nlike a string, but you also can put as many lines of text you as want\nuntil you type ``\"\"\"`` again. We'll also play with these.\n\n\n\n.. literalinclude:: ex/ex10.py\n :linenos:\n\n\nWhat You Should See\n===================\n\nLook for the tab characters that you made. In this exercise the spacing is\nimportant to get right.\n\n.. literalinclude:: ex/ex10.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Search online to see what other escape sequences are available.\n2. Use ``'''`` (triple-single-quote) instead. Can you see why you might use that instead of ``\"\"\"``?\n3. Combine escape sequences and format strings to create a more complex format.\n4. Remember the ``%r`` format? Combine ``%r`` with double-quote and \n single-quote escapes and print them out. Compare %r with %s. \n Notice how %r prints it the way you'd write it in your file, but \n %s prints it the way you'd like to see it?\n\n\n" }, { "alpha_fraction": 0.7452948689460754, "alphanum_fraction": 0.7503136992454529, "avg_line_length": 42.436363220214844, "blob_id": "d094d4bdaca9e8a3f314b6f04400806d8a6d2348", "content_id": "5b006a4193f198fe01039460424cb7dc9b6097dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2391, "license_type": "no_license", "max_line_length": 83, "num_lines": 55, "path": "/ex23.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 23: Read Some Code\n***************************\n\nYou should have spent last week getting your list of symbols straight\nand locked in your mind. Now you get to apply this to another\nweek reading code on the internet. This exercise will be daunting at first.\nI'm going to throw you in the deep end for a few days and have you just try\nyour best to read and understand some source code from real projects. The\ngoal isn't to get you to understand code, but to teach you the following\nthree skills:\n\n\n1. Finding Python source code for things you need.\n2. Reading through the code and looking for files.\n3. Trying to understand code you find.\n\nAt your level you really do not have the skills to evaluate the things\nyou find, but you can benefit from getting exposure and seeing how\nthings look.\n\nWhen you do this exercise, think of yourself as an anthropologist, trucking\nthrough a new land with just barely enough of the local language to get around\nand survive. Except, of course, that you will actually get out alive because\nthe internet isn't a jungle. Anyway.\n\nHere's what you do:\n\n1. Go to bitbucket.org with your favorite web browser and search for \"python\".\n2. Avoid any project with \"Python 3\" mentioned. That'll only confuse you.\n3. Pick a random project and click on it.\n4. Click on the ``Source`` tab and browse through the list of files and\n directories until you find a .py file (but not setup.py, that's useless).\n5. Start at the top and read through it, taking notes on what you think it\n does.\n6. If any symbols or strange words seem to interest you, write them down to\n research later.\n\nThat's it. Your job is to use what you know so far and see if you can read the\ncode and get a grasp of what it does. Try skimming the code first, and then\nread it in detail. Maybe also try taking very difficult parts and reading each\nsymbol you know outloud.\n\nNow try several three other sites:\n\n* github.com\n* launchpad.net\n* koders.com\n\nOn each of these sites you may find weird files ending in ``.c`` so stick to\n``.py`` files like the ones you have written in this book.\n\nA final fun thing to do is use the above four sources of Python code and \ntype in topics you are interested in instead of \"python\". Search for \"journalism\",\n\"cooking\", \"physics\", or anything you are curious about. Chances are there's\nsome code out there you could use right away.\n\n\n" }, { "alpha_fraction": 0.6461949348449707, "alphanum_fraction": 0.6662216186523438, "avg_line_length": 16, "blob_id": "3e4959a88a19af26986bcb89548b9d7b0ac4995e", "content_id": "57bc075f26c4f1e7c09e45e6d17dc8367a3feb21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1613, "license_type": "no_license", "max_line_length": 60, "num_lines": 44, "path": "/cn/ex11.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 11: ๆ้—ฎ\n*****************************\n\nๆˆ‘ๅทฒ็ปๅ‡บ่ฟ‡ๅพˆๅคšๆ‰“ๅฐ็›ธๅ…ณ็š„็ปƒไน ๏ผŒ่ฎฉไฝ ไน ๆƒฏๅ†™็ฎ€ๅ•็š„ไธœ่ฅฟ๏ผŒไฝ†็ฎ€ๅ•็š„ไธœ่ฅฟ้ƒฝๆœ‰็‚นๆ— ่Š๏ผŒ\\\n็Žฐๅœจ่ฏฅ่ทŸไธŠ่„šๆญฅไบ†ใ€‚ๆˆ‘ไปฌ็Žฐๅœจ่ฆๅš็š„ๆ˜ฏๆŠŠๆ•ฐๆฎ่ฏปๅˆฐไฝ ็š„็จ‹ๅบ้‡Œ่พนๅŽปใ€‚่ฟ™ๅฏ่ƒฝๅฏนไฝ ๆœ‰็‚น้šพๅบฆ๏ผŒ\\\nไฝ ๅฏ่ƒฝไธ€ไธ‹ๅญไธๆ˜Ž็™ฝ๏ผŒไธ่ฟ‡ไฝ ้œ€่ฆ็›ธไฟกๆˆ‘๏ผŒๆ— ่ฎบๅฆ‚ไฝ•ๆŠŠไน ้ข˜ๅšไบ†ๅ†่ฏดใ€‚ๅช่ฆๅšๅ‡ ไธช็ปƒไน \\\nไฝ ๅฐฑๆ˜Ž็™ฝไบ†ใ€‚\n\nไธ€่ˆฌ่ฝฏไปถๅš็š„ไบ‹ๆƒ…ไธป่ฆๅฐฑๆ˜ฏไธ‹้ขๅ‡ ๆก๏ผš\n\n1. ๆŽฅๅ—ไบบ็š„่พ“ๅ…ฅใ€‚\n2. ๆ”นๅ˜่พ“ๅ…ฅใ€‚\n3. ๆ‰“ๅฐๅ‡บๆ”นๅ˜ไบ†็š„่พ“ๅ…ฅใ€‚\n\nๅˆฐ็›ฎๅ‰ไธบๆญขไฝ ๅชๅšไบ†ๆ‰“ๅฐ๏ผŒไฝ†่ฟ˜ไธไผšๆŽฅๅ—ๆˆ–่€…ไฟฎๆ”นไบบ็š„่พ“ๅ…ฅใ€‚ไฝ ไนŸ่ฎธ่ฟ˜ไธ็Ÿฅ้“โ€œ่พ“ๅ…ฅ(input)โ€\\\nๆ˜ฏไป€ไนˆๆ„ๆ€ใ€‚ๆ‰€ไปฅ้—ฒ่ฏๅฐ‘่ฏด๏ผŒๆˆ‘ไปฌ่ฟ˜ๆ˜ฏๅผ€ๅง‹ๅš็‚น็ปƒไน ็œ‹ไฝ ่ƒฝไธ่ƒฝๆ˜Ž็™ฝใ€‚ไธ‹ไธ€ไธชไน ้ข˜้‡Œ่พนๆˆ‘ไปฌ\\\nไผš็ป™ไฝ ๆ›ดๅคš็š„่งฃ้‡Šใ€‚\n\n\n.. literalinclude:: ex/ex11.py\n :linenos:\n\n.. note::\n\n ๆณจๆ„ๅˆฐๆˆ‘ๅœจๆฏ่กŒ ``print`` ๅŽ้ขๅŠ ไบ†ไธช้€—ๅท(comma) ``,`` ไบ†ๅง๏ผŸ่ฟ™ๆ ท็š„่ฏ ``print``\n ๅฐฑไธไผš่พ“ๅ‡บๆ–ฐ่กŒ็ฌฆ่€Œ็ป“ๆŸ่ฟ™ไธ€่กŒ่ท‘ๅˆฐไธ‹ไธ€่กŒๅŽปไบ†ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n\n.. literalinclude:: ex/ex11.txt\n\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ไธŠ็ฝ‘ๆŸฅไธ€ไธ‹ Python ็š„ ``raw_input`` ๅฎž็Žฐ็š„ๆ˜ฏไป€ไนˆๅŠŸ่ƒฝใ€‚\n2. ไฝ ่ƒฝๆ‰พๅˆฐๅฎƒ็š„ๅˆซ็š„็”จๆณ•ๅ—๏ผŸๆต‹่ฏ•ไธ€ไธ‹ไฝ ไธŠ็ฝ‘ๆœ็ดขๅˆฐ็š„ไพ‹ๅญใ€‚\n3. ็”จ็ฑปไผผ็š„ๆ ผๅผๅ†ๅ†™ไธ€ๆฎต๏ผŒๆŠŠ้—ฎ้ข˜ๆ”นๆˆไฝ ่‡ชๅทฑ็š„้—ฎ้ข˜ใ€‚\n4. ๅ’Œ่ฝฌไน‰ๅบๅˆ—ๆœ‰ๅ…ณ็š„๏ผŒๆƒณๆƒณไธบไป€ไนˆๆœ€ๅŽไธ€่กŒ ``'6\\'2\"'`` ้‡Œ่พนๆœ‰ไธ€ไธช ``\\'`` ๅบๅˆ—ใ€‚ๅ•ๅผ•ๅท้œ€่ฆ\\\n ่ขซ่ฝฌไน‰๏ผŒไปŽ่€Œ้˜ฒๆญขๅฎƒ่ขซ่ฏ†ๅˆซไธบๅญ—็ฌฆไธฒ็š„็ป“ๅฐพใ€‚ๆœ‰ๆฒกๆœ‰ๆณจๆ„ๅˆฐ่ฟ™ไธ€็‚น๏ผŸ\n\n" }, { "alpha_fraction": 0.7094762921333313, "alphanum_fraction": 0.721657395362854, "avg_line_length": 48.4075813293457, "blob_id": "9f0c2fe8f7cd69d8336b3d2ab059a2b2e6a6b84a", "content_id": "7696593ec1ea13428732cb577d075d337f654d56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10426, "license_type": "no_license", "max_line_length": 185, "num_lines": 211, "path": "/ex50.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 50: Your First Website\n*******************************\n\nThese final three exercises will be very hard and you should take your time with them.\nIn this first one you'll build a simple web version of one of your games. Before you attempt this exercise you *must* have completed Exercise 46 successfully\nand have a working ``pip`` installed such that you can install packages and\nknow how to make a skeleton project directory. If you don't remember how\nto do this, go back to Exercise 46 and do it all over again.\n\n\nInstalling lpthw.web\n====================\n\nBefore creating your first web application, you'll first need to install the \"web framework\" called ``lpthw.web``. The term \"framework\" generally\nmeans \"some package that makes it easier for me to do something\". In the world\nof web applications, people create \"web frameworks\" to compensate for the difficult\nproblems they've encountered when making their own sites. They share these\ncommon solutions in the form of a package you can\ndownload to bootstrap your own projects.\n\nIn our case, we'll be using the ``lpthw.web`` framework, but there are many, many,\n*many* others you can choose from. For now, learn ``lpthw.web`` then branch out to another\none when you're ready (or just keep using ``lpthw.web`` since it's good enough).\n\nUsing ``pip`` install ``lpthw.web``:\n\n.. code-block:: console\n\n $ sudo pip install lpthw.web\n [sudo] password for zedshaw: \n Downloading/unpacking lpthw.web\n Running setup.py egg_info for package lpthw.web\n \n Installing collected packages: lpthw.web\n Running setup.py install for lpthw.web\n \n Successfully installed lpthw.web\n Cleaning up...\n\nThis will work on Linux and Mac OSX computers, but on Windows just drop the ``sudo`` part of\nthe pip install command and it should work. If not, go back to Exercise 46\nand make sure you can do it reliably.\n\n.. warning::\n\n Other Python programmers will warn you that ``lpthw.web`` is just a fork of\n another web framework called ``web.py``, and that ``web.py`` has too much\n \"magic\". If they say this, point out to them that Google App Engine\n originally used ``web.py`` and not a single Python programmer complained\n that it had too much magic, because they all worked at Google.\n If it's good enough for Google, then it's good enough for you to get\n started. Then, just get back to learning to code and ignore their goal of indoctrination over education.\n\n\n\nMake A Simple \"Hello World\" Project\n===================================\n\nNow you're going to make an initial very simple \"Hello World\" web application and\nproject directory using ``lpthw.web``. First, make your project directory:\n\n.. code-block:: console\n\n $ cd projects\n $ mkdir gothonweb\n $ cd gothonweb\n $ mkdir bin gothonweb tests docs templates\n $ touch gothonweb/__init__.py\n $ touch tests/__init__.py\n\nYou'll be taking the game from Exercise 42 and making it into a web application, so that's\nwhy you're calling it ``gothonweb``. Before you do that, we need to create the most basic\n``lpthw.web`` application possible. Put the following code into ``bin/app.py``:\n\n.. literalinclude:: ex/ex50.py\n :linenos:\n\nThen run the application like this:\n\n.. code-block:: console\n\n $ python bin/app.py\n http://0.0.0.0:8080/\n\nFinally, use your web browser and go to the URL ``http://localhost:8080/`` and you should see two\nthings. First, in your browser you'll see ``Hello, world!``. Second, you'll see your terminal\nwith new output like this:\n\n\n.. code-block:: console\n\n $ python bin/app.py\n http://0.0.0.0:8080/\n 127.0.0.1:59542 - - [13/Jun/2011 11:44:43] \"HTTP/1.1 GET /\" - 200 OK\n 127.0.0.1:59542 - - [13/Jun/2011 11:44:43] \"HTTP/1.1 GET /favicon.ico\" - 404 Not Found\n\nThose are log messages that ``lpthw.web`` prints out so you can see that the server is working, and\nwhat the browser is doing behind the scenes. The log messages help you debug and figure\nout when you have problems. For example, it's saying that your browser tried to get\n``/favicon.ico`` but that file didn't exist so it returned ``404 Not Found`` status\ncode.\n\nI haven't explained the way *any* of this web stuff works yet, because I want to get you setup\nand ready to roll so that I can explain it better in the next two exercises. To accomplish this, I'll have you break your lpthw.web application in various ways and then restructure it \nso that you know how it's setup.\n\nWhat's Going On?\n================\n\nHere's what's happening when your browser hits your application:\n\n1. Your browser makes a network connection to your own computer, which is called ``localhost``\n and is a standard way of saying \"whatever my own computer is called on the network\". It\n also uses port ``8080``.\n2. Once it connects, it makes an HTTP request to the ``bin/app.py`` application and asks for\n the ``/`` URL, which is commonly the first URL on any website.\n3. Inside ``bin/app.py`` you've got a list of URLs and what classes they match. The only one\n we have is the ``'/', 'index'`` mapping. This means that whenever someone goes to ``/``\n with a browser, ``lpthw.web`` will find the ``class index`` and load it to handle the\n request.\n4. Now that ``lpthw.web`` has found ``class index`` it calls the ``index.GET`` method on an\n instance of that class to actually handle the request. This function runs, and simply\n returns a string for what ``lpthw.web`` should send to the browser.\n5. Finally, ``lpthw.web`` has handled the request and sends this response to the browser which\n is what you are seeing.\n\nMake sure you really understand this. Draw up a diagram of how this information\nflows from your browser, to ``lpthw.web``, then to ``index.GET`` and back to your browser.\n\n\nFixing Errors\n=============\n\nFirst, delete line 11 where you assign the ``greeting`` variable, then hit refresh in your\nbrowser. You should see an error page now that gives you lots of information on how your\napplication just exploded. You know that the variable ``greeting`` is now missing, but\n``lpthw.web`` gives you this nice error page to track down exactly where. Do each\nof the following with this page:\n\n1. Look at each of the ``Local vars`` outputs (click on them) and see if you can follow what\n variables it's talking about and where they are.\n2. Look at the ``Request Information`` section and see if it matches anything you're already familiar with.\n This is information that your web browser is sending to your ``gothonweb`` application. You\n normally don't even know that it's sending this stuff, so now you get to see what it does.\n3. Try breaking this simple application in other ways and explore what happens. Don't forget to also\n look at the logs being printed into your terminal as ``lpthw.web`` will put other stack traces\n and information there too.\n\n\nCreate Basic Templates\n======================\n\nYou can break your lpthw.web application, but did you notice that \"Hello World\" isn't a very\ngood HTML page? This is a web application, and as such it needs a proper HTML \nresponse. To do that you will create a simple template that says \"Hello World\"\nin a big green font.\n\nThe first step is to create a ``templates/index.html`` file that looks like this:\n\n.. literalinclude:: ex/ex50/gothonweb/templates/index.html\n :linenos:\n\nIf you know what HTML is then this should look fairly familiar. If not, research\nHTML and try writing a few web pages by hand so you know how it works. This HTML file\nhowever is a *template*, which means that ``lpthw.web`` will fill in \"holes\" in the text\ndepending on variables you pass in to the template. Every place you see ``$greeting`` will be\na variable you'll pass to the template that alters its contents.\n\nTo make your ``bin/app.py`` do this, you need to add some code to tell ``lpthw.web`` where to\nload the template and to render it. Take that file and change it like this:\n\n.. literalinclude:: ex/ex50/gothonweb/bin/app.py\n :linenos:\n\nPay close attention to the new ``render`` variable, and how I changed the last line of ``index.GET``\nso it returns ``render.index()`` passing in your ``greeting`` variable.\n\nOnce you have that in place, reload the web page in your browser and you should see a different message\nin green. You should also be able to do a ``View Source`` on the page in your browser to see that it is\nvalid HTML.\n\nThis may have flown by you very fast, so let me explain how a template works:\n\n1. In your ``bin/app.py`` you've added a new variable ``render`` which is a ``web.template.render`` object.\n2. This ``render`` object knows how to load ``.html`` files out of the ``templates/`` directory because\n you passed that to it as a parameter.\n3. Later in your code, when the browser hits the ``index.GET`` like before, instead of just returning\n the string ``greeting``, you call ``render.index`` and pass the greeting to it as a variable.\n4. This ``render.index`` method is kind of a *magic* function where the ``render`` object sees that\n you're asking for ``index``, goes into the ``templates/`` directory, looks for a page named ``index.html``,\n and then \"renders\" it, or converts it.\n5. In the ``templates/index.html`` file you see the beginning definition that says this template takes \n a ``greeting`` parameter, just like a function. Also, just like Python this template is indentation\n sensitive, so make sure you get them right.\n6. Finally, you have the HTML in ``templates/index.html`` that looks at the ``greeting`` variable, and\n if it's there, prints one message using the ``$greeting``, or a default message.\n\nTo get deeper into this, change the greeting variable and the HTML to see what effect it has.\nAlso create another template named ``templates/foo.html`` and render that using ``render.foo()``\ninstead of ``render.index()`` like before. This will show you how the name of the function you\ncall on ``render`` is just matched to a ``.html`` file in ``templates/``.\n\n\nExtra Credit\n============\n\n1. Read the documentation at http://webpy.org/ which is the same as the ``lpthw.web`` project.\n2. Experiment with everything you can find there, including their example code.\n3. Read about HTML5 and CSS3 and make some other .html and .css files for practice.\n4. If you have a friend who knows Django and is willing to help you, then consider doing\n Ex 50, 51, and 52 in Django instead to see what that's like.\n\n" }, { "alpha_fraction": 0.7183613777160645, "alphanum_fraction": 0.7236649394035339, "avg_line_length": 36.69655227661133, "blob_id": "79ca903bc32b5b1f9cd33cce7eb92c94d04916fe", "content_id": "fa34439e42cb0cc2e3990c8a0b2831a364cdbb5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5468, "license_type": "no_license", "max_line_length": 92, "num_lines": 145, "path": "/ex46.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 46: A Project Skeleton\n*******************************\n\nThis will be where you start learning how to setup a good project \"skeleton\"\ndirectory. This skeleton directory will have all the basics you need to get a\nnew project up and running. It will have your project layout, automated tests,\nmodules, and install scripts. When you go to make a new project, just copy\nthis directory to a new name and edit the files to get started.\n\nSkeleton Contents\n=================\n\nFirst, create the structure of your skeleton directory with these \ncommands:\n\n.. code-block:: console\n\n $ mkdir -p projects\n $ cd projects/\n $ mkdir skeleton\n $ cd skeleton\n $ mkdir bin NAME tests docs\n\nI use a directory named ``projects`` to store all the various things I'm\nworking on. Inside that directory I have my ``skeleton`` directory that\nI put the basis of my projects into. The directory ``NAME`` will be \nrenamed to whatever you are calling your project's main module when you \nuse the skeleton.\n\nNext we need to setup some initial \nfiles:\n\n\n.. code-block:: console\n\n $ touch NAME/__init__.py\n $ touch tests/__init__.py\n\nThat creates empty Python module directories we can put our code in.\nThen we need to create a ``setup.py`` file we can use to install our\nproject later if we want:\n\n\n.. literalinclude:: ex/ex46.py\n :linenos:\n\nEdit this file so that it has your contact information \nand is ready to go for when you copy it.\n\nFinally you will want a simple skeleton file for tests named ``tests/NAME_tests.py``:\n\n.. literalinclude:: ex/ex46_tests.py\n :linenos:\n\n\nInstalling Python Packages\n--------------------------\n\nMake sure you have some packages installed that makes\nthese things work. Here's the problem though. You are at a point where it's\ndifficult for me to help you do that and keep this book sane and clean. There\nare so many ways to install software on so many computers that I'd have to spend\n10 pages walking you through every step, and let me tell you I am a lazy guy.\n\nRather than tell you how to do it exactly, I'm going to tell you what you \nshould install, and then tell you to figure it out and get it working. This\nwill be really good for you since it will open a whole world of software\nyou can use that other people have released to the world.\n\nNext, install the following python packages:\n\n1. pip from http://pypi.python.org/pypi/pip\n2. distribute from http://pypi.python.org/pypi/distribute\n3. nose from http://pypi.python.org/pypi/nose/\n4. virtualenv from http://pypi.python.org/pypi/virtualenv\n\nDo not just download these packages and install them by hand. Instead see how \nother people recommend you install these packages and use them for your particular\nsystem. The process will be different for most versions of Linux, OSX, and definitely\ndifferent for Windows.\n\nI am warning you, this will be frustrating. In the business we call this \"yak shaving\".\nYak shaving is any activity that is mind numblingly irritatingly boring and tedious\nthat you have to do before you can do something else that's more fun. You want to\ncreate cool Python projects, but you can't do that until you setup a skeleton \ndirectory, but you can't setup a skeleton directory until you install some packages,\nbut you can't install packages until you install package installers, and you can't\ninstall package installers until you figure out how your system installs software\nin general, and so on.\n\nStruggle through this anyway. Consider it your trial-by-annoyance to get into\nthe programmer club. Every programmer has to do these annoying tedious tasks\nbefore they can do something cool.\n\nTesting Your Setup\n==================\n\nAfter you get all that installed you should be able to do this:\n\n.. code-block:: console\n\n $ nosetests\n .\n ----------------------------------------------------------------------\n Ran 1 test in 0.007s\n\n OK\n\nI'll explain what this ``nosetests`` thing is doing in the next exercise, but\nfor now if you do not see that, you probably got something wrong. Make sure\nyou put ``__init__.py`` files in your ``NAME`` and ``tests`` directory and make\nsure you got ``tests/NAME_tests.py`` right.\n\n\nUsing The Skeleton\n==================\n\nYou are now done with most of your yak shaving. Whenever you want to start a new\nproject, just do this:\n\n\n1. Make a copy of your skeleton directory. Name it after your new project.\n2. Rename (move) the NAME module to be the name of your project or whatever you want to call\n your root module.\n3. Edit your setup.py to have all the information for your project.\n4. Rename ``tests/NAME_tests.py`` to also have your module name.\n5. Double check it's all working using ``nosetests`` again.\n6. Start coding.\n\n\nRequired Quiz\n=============\n\nThis exercise doesn't have extra credit but a quiz you should complete:\n\n1. Read about how to use all of the things you installed.\n2. Read about the ``setup.py`` file and all it has to offer. Warning, it is not a very\n well-written piece of software, so it will be very strange to use.\n3. Make a project and start putting code into the module, then get the \n module working.\n4. Put a script in the ``bin`` directory that you can run. Read about how you can make\n a Python script that's runnable for your system.\n5. Mention the ``bin`` script you created in your ``setup.py`` so that it gets installed.\n6. Use your ``setup.py`` to install your own module and make sure it works, then use\n ``pip`` to uninstall it.\n\n\n" }, { "alpha_fraction": 0.6775777339935303, "alphanum_fraction": 0.6873977184295654, "avg_line_length": 15.972222328186035, "blob_id": "0fbfad857157d378e3f7cc26e2ec5985b55293e6", "content_id": "66337f431a5abeb9cdf07fa91824c925703dc40a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 53, "num_lines": 36, "path": "/cn/ex31.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 31: ไฝœๅ‡บๅ†ณๅฎš\n*****************************\n\n่ฟ™ๆœฌไนฆ็š„ไธŠๅŠ้ƒจๅˆ†ไฝ ๆ‰“ๅฐไบ†ไธ€ไบ›ไธœ่ฅฟ๏ผŒ่€Œไธ”่ฐƒ็”จไบ†ๅ‡ฝๆ•ฐ๏ผŒไธ่ฟ‡ไธ€ๅˆ‡้ƒฝๆ˜ฏ็›ด็บฟๅผ่ฟ›่กŒ็š„ใ€‚\\\nไฝ ็š„่„šๆœฌไปŽๆœ€ไธŠ้ขไธ€่กŒๅผ€ๅง‹๏ผŒไธ€่ทฏ่ฟ่กŒๅˆฐ็ป“ๆŸ๏ผŒไฝ†ๅ…ถไธญๅนถๆฒกๆœ‰ๅ†ณๅฎš็จ‹ๅบๆตๅ‘็š„ๅˆ†ๆ”ฏ็‚นใ€‚\\\n็Žฐๅœจไฝ ๅทฒ็ปๅญฆไบ† ``if``, ``else``, ๅ’Œ ``elif`` ๏ผŒไฝ ๅฐฑๅฏไปฅๅผ€ๅง‹ๅˆ›ๅปบๅŒ…ๅซๆกไปถๅˆคๆ–ญ\\\n็š„่„šๆœฌไบ†ใ€‚\n\nไธŠไธ€ไธช่„šๆœฌไธญไฝ ๅ†™ไบ†ไธ€็ณปๅˆ—็š„็ฎ€ๅ•ๆ้—ฎๆต‹่ฏ•ใ€‚่ฟ™่Š‚็š„่„šๆœฌไธญ๏ผŒไฝ ๅฐ†้œ€่ฆๅ‘็”จๆˆทๆ้—ฎ๏ผŒ\\\nไพๆฎ็”จๆˆท็š„็ญ”ๆกˆๆฅๅšๅ‡บๅ†ณๅฎšใ€‚ๆŠŠ่„šๆœฌๅ†™ไธ‹ๆฅ๏ผŒๅคšๅคš้ผ“ๆฃไธ€้˜ตๅญ๏ผŒ็œ‹็œ‹ๅฎƒ็š„ๅทฅไฝœๅŽŸ็†ๆ˜ฏ\\\nไป€ไนˆใ€‚\n\n.. literalinclude:: ex/ex31.py\n :linenos:\n\n่ฟ™้‡Œ็š„้‡็‚นๆ˜ฏไฝ ๅฏไปฅๅœจโ€œif ่ฏญๅฅโ€ๅ†…้ƒจๅ†ๆ”พไธ€ไธชโ€œif ่ฏญๅฅโ€ใ€‚่ฟ™ๆ˜ฏ\\\nไธ€ไธชๅพˆๅผบๅคง็š„ๅŠŸ่ƒฝ๏ผŒๅฏไปฅ็”จๆฅๅˆ›ๅปบๅตŒๅฅ—(nested)็š„ๅ†ณๅฎš๏ผŒๅ…ถไธญ็š„ไธ€ไธชๅˆ†ๆ”ฏๅฐ†ๅผ•ๅ‘ๅฆไธ€ไธช\\\nๅˆ†ๆ”ฏ็š„ๅญๅˆ†ๆ”ฏใ€‚\n\nไฝ ้œ€่ฆ็†่งฃ if ่ฏญๅฅ ๅŒ…ๅซ if ่ฏญๅฅ ็š„ๆฆ‚ๅฟตใ€‚ๅšไธ€ไธ‹ๅŠ ๅˆ†ไน ้ข˜๏ผŒ่ฟ™ๆ ทไฝ ไผš็กฎไฟก\\\n่‡ชๅทฑ็œŸๆญฃ็†่งฃไบ†ๅฎƒไปฌใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nๆˆ‘ๅœจ็Žฉไธ€ไธชๅฐๅ†’้™ฉๆธธๆˆ๏ผŒๆˆ‘็Žฉ็š„ๆฐดๅนณไธๆ€Žไนˆๅฅฝ๏ผš\n\n.. literalinclude:: ex/ex31.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\nไธบๆธธๆˆๆทปๅŠ ๆ–ฐ็š„้ƒจๅˆ†๏ผŒๆ”นๅ˜็Žฉๅฎถๅšๅ†ณๅฎš็š„ไฝ็ฝฎใ€‚ๅฐฝ่‡ชๅทฑ็š„่ƒฝๅŠ›ๆ‰ฉๅฑ•่ฟ™ไธชๆธธๆˆ๏ผŒไธ่ฟ‡ๅˆซๆŠŠๆธธๆˆๅผ„ๅพ—\\\nๅคชๆ€ชๅผ‚ไบ†ใ€‚\n" }, { "alpha_fraction": 0.6213151812553406, "alphanum_fraction": 0.6417233347892761, "avg_line_length": 28.066667556762695, "blob_id": "0b3a78089b86be5be1dfba4feb0c2bcaac971502", "content_id": "d0bf368aacd2c32bf961d56a1c3a3c273e7f011d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/cn/ex/ex52/gothonweb/tests/app_tests.py", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "from nose.tools import *\nfrom bin.app import app\nfrom tests.tools import assert_response\n\ndef test_index():\n # check that we get a 404 on the / URL\n resp = app.request(\"/\")\n assert_response(resp, status=\"303\")\n\n resp = app.request(\"/game\", method=\"GET\")\n\n # test that we get expected values\n data = {'action': 'dodge!'}\n resp = app.request(\"/game\", method=\"POST\", data=data)\n assert_response(resp, status=\"303\")\n \n" }, { "alpha_fraction": 0.7002924084663391, "alphanum_fraction": 0.7090643048286438, "avg_line_length": 34.96491241455078, "blob_id": "385a8df2ce44f08337052bfe9c75d892ddbc1a8a", "content_id": "c6bf641e2d006171177e0629302e0a0c15df86c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2052, "license_type": "no_license", "max_line_length": 99, "num_lines": 57, "path": "/ex4.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 4: Variables And Names\n*******************************\n\nNow you can print things with ``print`` and you can do math. The next step is\nto learn about variables. In programming a variable is nothing more than a\nname for something so you can use the name rather than the something as you\ncode. Programmers use these variable names to make their code read more like\nEnglish, and because they have lousy memories. If they didn't use good names\nfor things in their software, they'd get lost when they tried to read their\ncode again.\n\nIf you get stuck with this exercise, remember the tricks you have been taught\nso far of finding differences and focusing on details:\n\n1. Write a comment above each line explaining to yourself what it does in English.\n2. Read your ``.py`` file backwards.\n3. Read your ``.py`` file out loud saying even the characters.\n\n\n.. literalinclude:: ex/ex4.py\n :linenos:\n\n.. note::\n\n The ``_`` in ``space_in_a_car`` is called an ``underscore character``. Find out how to type it\n if you do not already know. We use this character a lot to put an imaginary space between\n words in variable names.\n\nWhat You Should See\n===================\n\n\n.. literalinclude:: ex/ex4.txt\n :language: console\n\n\nExtra Credit\n============\n\nWhen I wrote this program the first time I had a mistake, and *python* told me\nabout it like this:\n\n.. literalinclude:: ex/ex4.err\n\nExplain this error in your own words. Make sure you use line numbers and\nexplain why.\n\nHere's more extra credit:\n\n1. I used 4.0 for ``space_in_a_car``, but is that necessary? What happens if it's\n just 4?\n2. Remember that 4.0 is a \"floating point\" number. Find out what that means.\n3. Write comments above each of the variable assignments.\n4. Make sure you know what ``=`` is called (equals) and that it's making names for things.\n5. Remember ``_`` is an underscore character.\n6. Try running ``python`` as a calculator like you did before and use variable names\n to do your calculations. Popular variable names are also ``i``, ``x``, and ``j``.\n\n\n" }, { "alpha_fraction": 0.6681184768676758, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 21.509803771972656, "blob_id": "cbc9d78c94a8f654340c0f5e2403599019301c9d", "content_id": "f2fa348ce72760991fa4a9fd3a9bacc0b676d93e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2440, "license_type": "no_license", "max_line_length": 57, "num_lines": 51, "path": "/cn/ex32.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 32: ๅพช็Žฏๅ’Œๅˆ—่กจ\n*************************\n\n็Žฐๅœจไฝ ๅบ”่ฏฅๆœ‰่ƒฝๅŠ›ๅ†™ๆ›ดๆœ‰่ถฃ็š„็จ‹ๅบๅ‡บๆฅไบ†ใ€‚ๅฆ‚ๆžœไฝ ่ƒฝไธ€็›ด่ทŸๅพ—ไธŠ๏ผŒไฝ ๅบ”่ฏฅๅทฒ็ป็œ‹ๅ‡บ\\\nๅฐ†โ€œif ่ฏญๅฅโ€ๅ’Œโ€œๅธƒๅฐ”่กจ่พพๅผโ€็ป“ๅˆ่ตทๆฅๅฏไปฅ่ฎฉ็จ‹ๅบไฝœๅ‡บไธ€ไบ›ๆ™บ่ƒฝๅŒ–็š„ไบ‹ๆƒ…ใ€‚\n\n็„ถ่€Œ๏ผŒๆˆ‘ไปฌ็š„็จ‹ๅบ่ฟ˜้œ€่ฆ่ƒฝๅพˆๅฟซๅœฐๅฎŒๆˆ้‡ๅค็š„ไบ‹ๆƒ…ใ€‚่ฟ™่Š‚ไน ้ข˜ไธญๆˆ‘ไปฌๅฐ†ไฝฟ็”จ ``for-loop`` \n๏ผˆfor ๅพช็Žฏ๏ผ‰ๆฅๅˆ›ๅปบๅ’Œๆ‰“ๅฐๅ‡บๅ„็งๅ„ๆ ท็š„ๅˆ—่กจใ€‚ๅœจๅš็š„่ฟ‡็จ‹ไธญ๏ผŒไฝ ไผš้€ๆธๆ˜Ž็™ฝๅฎƒไปฌๆ˜ฏๆ€Žไนˆ\\\nๅ›žไบ‹ใ€‚็Žฐๅœจๆˆ‘ไธไผšๅ‘Š่ฏ‰ไฝ ๏ผŒไฝ ้œ€่ฆ่‡ชๅทฑๆ‰พๅˆฐ็ญ”ๆกˆใ€‚\n\nๅœจไฝ ๅผ€ๅง‹ไฝฟ็”จ for ๅพช็Žฏไน‹ๅ‰๏ผŒไฝ ้œ€่ฆๅœจๆŸไธชไฝ็ฝฎๅญ˜ๆ”พๅพช็Žฏ็š„็ป“ๆžœใ€‚ๆœ€ๅฅฝ็š„ๆ–นๆณ•ๆ˜ฏไฝฟ็”จๅˆ—่กจ(list)๏ผŒ\\\n้กพๅๆ€ไน‰๏ผŒๅฎƒๅฐฑๆ˜ฏไธ€ไธชๆŒ‰้กบๅบๅญ˜ๆ”พไธœ่ฅฟ็š„ๅฎนๅ™จใ€‚ๅˆ—่กจๅนถไธๅคๆ‚๏ผŒไฝ ๅชๆ˜ฏ่ฆๅญฆไน \\\nไธ€็‚นๆ–ฐ็š„่ฏญๆณ•ใ€‚้ฆ–ๅ…ˆๆˆ‘ไปฌ็œ‹็œ‹ๅฆ‚ไฝ•ๅˆ›ๅปบๅˆ—่กจ๏ผš\n\n.. code-block:: python\n \n hairs = ['brown', 'blond', 'red']\n eyes = ['brown', 'blue', 'green']\n weights = [1, 2, 3, 4]\n\nไฝ ่ฆๅš็š„ๆ˜ฏไปฅ ``[`` ๏ผˆๅทฆๆ–นๆ‹ฌๅท๏ผ‰ๅผ€ๅคดโ€œๆ‰“ๅผ€โ€ๅˆ—่กจ๏ผŒ็„ถๅŽๅ†™ไธ‹ไฝ ่ฆๆ”พๅ…ฅๅˆ—่กจ็š„ไธœ่ฅฟ๏ผŒ\\\n็”จ้€—ๅท้š”ๅผ€๏ผŒๅฐฑ่ทŸๅ‡ฝๆ•ฐ็š„ๅ‚ๆ•ฐไธ€ๆ ท๏ผŒๆœ€ๅŽไฝ ้œ€่ฆ็”จ ``]`` ๏ผˆๅณๆ–นๆ‹ฌๅท๏ผ‰็ป“ๆŸๅณๆ–นๆ‹ฌๅท\\\n็š„ๅฎšไน‰ใ€‚็„ถๅŽ Python ๆŽฅๆ”ถ่ฟ™ไธชๅˆ—่กจไปฅๅŠ้‡Œ่พนๆ‰€ๆœ‰็š„ๅ†…ๅฎน๏ผŒๅฐ†ๅ…ถ่ต‹็ป™ไธ€ไธชๅ˜้‡ใ€‚\n\n.. warning::\n\n ๅฏนไบŽไธไผš็ผ–็จ‹็š„ไบบๆฅ่ฏด่ฟ™ๆ˜ฏไธ€ไธช้šพ็‚นใ€‚ไน ๆƒฏๆ€งๆ€็ปดๅ‘Š่ฏ‰ไฝ ็š„ๅคง่„‘ๅคงๅœฐๆ˜ฏๅนณ็š„ใ€‚\\\n ่ฎฐๅพ—ไธŠไธ€ไธช็ปƒไน ไธญ็š„ if ่ฏญๅฅๅตŒๅฅ—ๅง๏ผŒไฝ ๅฏ่ƒฝ่ง‰ๅพ—่ฆ็†่งฃๅฎƒๆœ‰ไบ›้šพๅบฆ๏ผŒๅ› ไธบ\\\n ็”Ÿๆดปไธญไธ€่ˆฌไบบไธไผšๅŽปๅƒ่ฟ™ๆ ท็š„้—ฎ้ข˜๏ผŒไฝ†่ฟ™ๆ ท็š„้—ฎ้ข˜ๅœจ็ผ–็จ‹ไธญๅ‡ ไนŽๅˆฐๅค„้ƒฝๆ˜ฏใ€‚ไฝ ไผš็œ‹ๅˆฐ\\\n ไธ€ไธชๅ‡ฝๆ•ฐ่ฐƒ็”จๅฆๅค–ไธ€ไธชๅŒ…ๅซ if ่ฏญๅฅ็š„ๅ‡ฝๆ•ฐ๏ผŒๅ…ถไธญๅˆๆœ‰ๅตŒๅฅ—ๅˆ—่กจ็š„ๅˆ—่กจใ€‚ๅฆ‚ๆžœไฝ ็œ‹ๅˆฐ\\\n ่ฟ™ๆ ท็š„ไธœ่ฅฟไธ€ๆ—ถๆ— ๆณ•ๅผ„ๆ‡‚๏ผŒๅฐฑ็”จ็บธๅธ่ฎฐไธ‹ๆฅ๏ผŒๆ‰‹ๅŠจๅˆ†ๅ‰ฒไธ‹ๅŽป๏ผŒ็›ดๅˆฐๅผ„ๆ‡‚ไธบๆญขใ€‚\n\n็Žฐๅœจๆˆ‘ไปฌๅฐ†ไฝฟ็”จๅพช็Žฏๅˆ›ๅปบไธ€ไบ›ๅˆ—่กจ๏ผŒ็„ถๅŽๅฐ†ๅฎƒไปฌๆ‰“ๅฐๅ‡บๆฅใ€‚\n\n.. literalinclude:: ex/ex32.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex32.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๆณจๆ„ไธ€ไธ‹ ``range`` ็š„็”จๆณ•ใ€‚ๆŸฅไธ€ไธ‹ ``range`` ๅ‡ฝๆ•ฐๅนถ็†่งฃๅฎƒใ€‚\n2. ๅœจ็ฌฌ 23 ่กŒ๏ผŒไฝ ๅฏไปฅๅฏไปฅ็›ดๆŽฅๅฐ† elements ่ต‹ๅ€ผไธบ range(0,6)๏ผŒ่€Œๆ— ้œ€ไฝฟ็”จ for ๅพช็Žฏ๏ผŸ\n3. ๅœจ Python ๆ–‡ๆกฃไธญๆ‰พๅˆฐๅ…ณไบŽๅˆ—่กจ็š„ๅ†…ๅฎน๏ผŒไป”็ป†้˜…่ฏปไปฅไธ‹๏ผŒ้™คไบ† ``append`` ไปฅๅค–ๅˆ—่กจ่ฟ˜\\\n ๆ”ฏๆŒๅ“ชไบ›ๆ“ไฝœ๏ผŸ\n" }, { "alpha_fraction": 0.6877551078796387, "alphanum_fraction": 0.6959183812141418, "avg_line_length": 36.61538314819336, "blob_id": "3372a061f6889ac6a35ac870156c3ddfb48553e0", "content_id": "e03e7d39b3f8a599fbcc68631a0e05b4c762a533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 980, "license_type": "no_license", "max_line_length": 158, "num_lines": 26, "path": "/ex2.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 2: Comments And Pound Characters\n*****************************************\n\nComments are very important in your programs. They are used to tell you\nwhat something does in English, and they also are used to disable parts\nof your program if you need to remove them temporarily. Here's how you\nuse comments in Python:\n\n.. literalinclude:: ex/ex2.py\n :language: python\n :linenos:\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex2.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Find out if you were right about what the # character does and make sure you know what it's called (octothorpe or pound character).\n2. Take your ``ex2.py`` file and review each line going backwards. Start at the last line, and check each word in reverse against what you should have typed.\n3. Did you find more mistakes? Fix them.\n4. Read what you typed above out loud, including saying each character by its name. Did you find more mistakes? Fix them.\n\n\n" }, { "alpha_fraction": 0.7012743353843689, "alphanum_fraction": 0.708395779132843, "avg_line_length": 44.186439514160156, "blob_id": "36cc7c58b2c598650b22757a536d92768e5204ca", "content_id": "99a64fc7a95b968475d0701b6589c3ad3578847d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2668, "license_type": "no_license", "max_line_length": 88, "num_lines": 59, "path": "/ex30.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 30: Else And If\n*************************\n\nIn the last exercise you worked out some ``if-statements``, and then tried to\nguess what they are and how they work. Before you learn more I'll explain what\neverything is by answering the questions you had from extra credit. You did\nthe extra credit right?\n\n\n1. What do you think the ``if`` does to the code under it? An if statement\n creates what is called a \"branch\" in the code. It's kind of like those\n choose your own adventure books where you are asked to turn to one page if\n you make one choice, and another if you go a different direction. The\n ``if-statement`` tells your script, \"If this boolean expression is True,\n then run the code under it, otherwise skip it.\"\n2. Why does the code under the ``if`` need to be indented 4 spaces?\n A colon at the end of a line is how you tell Python you are going to \n create a new \"block\" of code, and then indenting 4 spaces tells Python\n what lines of code are in that block. This is *exactly* the same thing\n you did when you made functions in the first half of the book.\n3. What happens if it isn't indented?\n If it isn't indented, you will most likely create a Python error. Python\n expects you to indent *something* after you end a line with a ``:`` (colon).\n4. Can you put other boolean expressions from Ex. 27 in the ``if`` statement? Try it.\n Yes you can, and they can be as complex as you like, although really complex \n things generally are bad style.\n5. What happens if you change the initial values for ``people``, ``cats``,\n and ``dogs``? Because you are comparing numbers, if you change the numbers,\n different ``if-statements`` will evaluate to ``True`` and the blocks of code\n under them will run. Go back and put different numbers in and see if you\n can figure out in your head what blocks of code will run.\n\n\nCompare my answers to your answers, and make sure you *really* understand the concept of\na \"block\" of code. This is important for when you do the next exercise where you write\nall the parts of ``if-statements`` that you can use.\n\n\nType this one in and make it work too.\n\n.. literalinclude:: ex/ex30.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex30.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Try to guess what ``elif`` and ``else`` are doing.\n2. Change the numbers of ``cars``, ``people``, and ``buses`` and then trace\n through each ``if-statement`` to see what will be printed.\n3. Try some more complex boolean expressions like ``cars > people and buses < cars``.\n4. Above each line write an English description of what the line does.\n\n\n" }, { "alpha_fraction": 0.6987043619155884, "alphanum_fraction": 0.7069493532180786, "avg_line_length": 41.439998626708984, "blob_id": "46a2cbda8084fb3f5c2bad7d59488488409b9b5a", "content_id": "49cf8a81e9d8785d2c3ba9018ae9273fdfc74fcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4245, "license_type": "no_license", "max_line_length": 102, "num_lines": 100, "path": "/ex41.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 41: Gothons From Planet Percal #25\n*******************************************\n\nDid you figure out the secret of the function in the dict from the\nlast exercise? Can you explain it to yourself? I'll explain it\nand you can compare your explanation with mine. Here are the lines of\ncode we are talking about:\n\n.. code-block:: python\n \n cities['_find'] = find_city\n city_found = cities['_find'](cities, state)\n\nRemember that functions can be variables too. The ``def find_city`` just makes\nanother variable name in your current module that you can use anywhere. In\nthis code first we are putting the function ``find_city`` into the dict ``cities``\nas ``'_find'``. This is the same as all the others where we set states to some\ncities, but in this case it's actually the function.\n\nAlright, so once we know that ``find_city`` is in the dict at ``_find``, that means\nwe can do work with it. The 2nd line of code (used later in the previous \nexercise) can be broken down like this:\n\n1. Python sees ``city_found =`` and knows we want to make a new variable.\n2. It then reads ``cities`` and finds that variable, it's a dict.\n3. Then there's ``['_find']`` which will *index* into the ``cities`` dict and pull\n out whatever is at ``_find``.\n4. What is at ``['_find']`` is our function ``find_city`` so Python *then* knows\n it's got a function, and when it hits ``(`` it does the function call.\n5. The parameters ``cities, state`` are passed to this function ``find_city``, and\n it runs because it's called.\n6. ``find_city`` then tries to look up ``states`` inside ``cities``, and returns\n what it finds or a message saying it didn't find anything.\n7. Python takes what ``find_city`` returned, and *finally* that is what is assigned\n to ``city_found`` all the way at the beginning.\n\nHere's a trick. Sometimes these things read better in English\nif you read the code backwards. This is how I would do\nit for that same line (remember *backwards*):\n\n1. ``state`` and ``city`` are...\n2. passed as parameters to...\n3. a function at...\n4. ``'_find'`` inside...\n5. the dict ``cities``...\n6. and finally assigned to ``city_found``.\n\nHere's another way to read it, this time \"inside-out\".\n\n1. Find the center item of the expression, in this case ``['_find']``.\n2. Go counter-clock-wise and you have a dict ``cities``, so this finds the element\n ``_find`` in cities.\n3. That gives us a function. Keep going counter-clock-wise and you get to the parameters.\n4. The parameters are passed to the function, and that returns a result. Go counter-clock-wise again.\n5. Finally, we are at the ``city_found =`` assignment, and we have our end result.\n\nAfter decades of programming I don't even think about these three ways to read\ncode. I just glance at it and know what it means. I can even glance at a whole\nscreen of code, and all the bugs and errors jump out at me. That took an incredibly\nlong time and quite a bit more study than is sane. To get that way, I learned these\nthree ways of reading most any programming language:\n\n1. Front to back.\n2. Back to front.\n3. Counter-clock-wise.\n\nTry them out when you have a difficult statement to figure out.\n\nNow type in your next exercise, then go over it.\nThis one is gonna be fun.\n\n.. literalinclude:: ex/ex41.py\n :linenos:\n\nIt's a lot of code, but go through it, make sure it works, play it.\n\n\nWhat You Should See\n===================\n\nHere's me playing the game.\n\n.. literalinclude:: ex/ex41.txt\n :language: console\n\n\n\nExtra Credit\n============\n\n1. Explain how returning the next room works.\n2. Add cheat codes to the game so you can get past the more difficult rooms.\n3. Instead of having each function print itself, learn about \"doc string\" style comments.\n Write the room description as doc comments, and change the runner to print them.\n4. Once you have doc comments as the room description, do you need to have the\n function prompt even? Have the runner prompt the user, and pass that in to\n each function. Your functions should just be if-statements printing the\n result and returning the next room.\n5. This is actually a small version of something called a \"finite state machine\".\n Read about them. They might not make sense but try anyway.\n\n" }, { "alpha_fraction": 0.6257796287536621, "alphanum_fraction": 0.6424116492271423, "avg_line_length": 19.869565963745117, "blob_id": "45f36ee37978ebfd3488f6f455acfa42cf495eb9", "content_id": "709c6c3f8973fe4e3b7ea6a5a30dcdce3c01a03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 937, "license_type": "no_license", "max_line_length": 66, "num_lines": 23, "path": "/cn/ex2.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 2: ๆณจ้‡Šๅ’Œไบ•ๅท\n*****************************************\n\n็จ‹ๅบ้‡Œ็š„ๆณจ้‡Šๆ˜ฏๅพˆ้‡่ฆ็š„ใ€‚ๅฎƒไปฌๅฏไปฅ็”จ่‡ช็„ถ่ฏญ่จ€ๅ‘Š่ฏ‰ไฝ ๆŸๆฎตไปฃ็ ็š„ๅŠŸ่ƒฝๆ˜ฏไป€ไนˆใ€‚ๅœจไฝ ๆƒณ่ฆไธดๆ—ถ็งป้™คไธ€ๆฎต\\\nไปฃ็ ๆ—ถ๏ผŒไฝ ่ฟ˜ๅฏไปฅ็”จๆณจ่งฃ็š„ๆ–นๅผๅฐ†่ฟ™ๆฎตไปฃ็ ไธดๆ—ถ็ฆ็”จใ€‚ๆŽฅไธ‹ๆฅ็š„็ปƒไน ๅฐ†่ฎฉไฝ ๅญฆไผšๆณจ้‡Š:\n\n.. literalinclude:: ex/ex2.py\n :language: python\n :linenos:\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex2.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅผ„ๆธ…ๆฅš\"#\"็ฌฆๅท็š„ไฝœ็”จใ€‚่€Œไธ”่ฎฐไฝๅฎƒ็š„ๅๅญ—ใ€‚(ไธญๆ–‡ไธบไบ•ๅท๏ผŒ่‹ฑๆ–‡ไธบ octothorpe ๆˆ–่€… pound character)ใ€‚\n2. ๆ‰“ๅผ€ไฝ ็š„ ``ex2.py`` ๆ–‡ไปถ๏ผŒไปŽๅŽๅพ€ๅ‰้€่กŒๆฃ€ๆŸฅใ€‚ไปŽๆœ€ๅŽไธ€่กŒๅผ€ๅง‹๏ผŒๅ€’็€้€ไธชๅ•่ฏๅ•่ฏๆฃ€ๆŸฅๅ›žๅŽปใ€‚\n3. ๆœ‰ๆฒกๆœ‰ๅ‘็Žฐไป€ไนˆ้”™่ฏฏๅ‘ข๏ผŸๆœ‰็š„่ฏๅฐฑๆ”นๆญฃ่ฟ‡ๆฅ.\n4. ๆœ—่ฏปไฝ ๅ†™็š„ไน ้ข˜๏ผŒๆŠŠๆฏไธชๅญ—็ฌฆ้ƒฝ่ฏปๅ‡บๆฅใ€‚ๆœ‰ๆฒกๆœ‰ๅ‘็Žฐๆ›ดๅคš็š„้”™่ฏฏๅ‘ข๏ผŸๆœ‰็š„่ฏไนŸไธ€ๆ ทๆ”นๆญฃ่ฟ‡ๆฅใ€‚\n\n" }, { "alpha_fraction": 0.6853658556938171, "alphanum_fraction": 0.7016260027885437, "avg_line_length": 22.653846740722656, "blob_id": "4c2c00e55ac4f1f28167b1354ba1c49339698d40", "content_id": "29b43de559f449930cfbb7bee568ed5308d37f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2656, "license_type": "no_license", "max_line_length": 58, "num_lines": 52, "path": "/cn/ex33.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 33: While ๅพช็Žฏ\n************************\n\nๆŽฅไธ‹ๆฅๆ˜ฏไธ€ไธชๆ›ดๅœจไฝ ๆ„ๆ–™ไน‹ๅค–็š„ๆฆ‚ๅฟต๏ผš ``while-loop``๏ผˆwhile ๅพช็Žฏ๏ผ‰ใ€‚``while-loop``\nไผšไธ€็›ดๆ‰ง่กŒๅฎƒไธ‹้ข็š„ไปฃ็ ็‰‡ๆฎต๏ผŒ็›ดๅˆฐๅฎƒๅฏนๅบ”็š„ๅธƒๅฐ”่กจ่พพๅผไธบ ``False`` ๆ—ถๆ‰ไผšๅœไธ‹ๆฅใ€‚ \n\n็ญ‰็ญ‰๏ผŒไฝ ่ฟ˜่ƒฝ่ทŸๅพ—ไธŠ่ฟ™ไบ›ๆœฏ่ฏญๅง๏ผŸๅฆ‚ๆžœไฝ ็š„ๆŸไธ€่กŒๆ˜ฏไปฅ ``:`` ๏ผˆๅ†’ๅท, colon๏ผ‰็ป“ๅฐพ๏ผŒ\\\n้‚ฃๅฐฑๆ„ๅ‘ณ็€ๆŽฅไธ‹ๆฅ็š„ๅ†…ๅฎนๆ˜ฏไธ€ไธชๆ–ฐ็š„ไปฃ็ ็‰‡ๆฎต๏ผŒๆ–ฐ็š„ไปฃ็ ็‰‡ๆฎตๆ˜ฏ้œ€่ฆ่ขซ็ผฉ่ฟ›็š„ใ€‚ๅชๆœ‰ๅฐ†\\\nไปฃ็ ็”จ่ฟ™ๆ ท็š„ๆ–นๅผๆ ผๅผๅŒ–๏ผŒPython ๆ‰่ƒฝ็Ÿฅ้“ไฝ ็š„็›ฎ็š„ใ€‚ๅฆ‚ๆžœไฝ ไธๅคชๆ˜Ž็™ฝ่ฟ™ไธ€็‚น๏ผŒๅฐฑๅ›žๅŽป\\\n็œ‹็œ‹โ€œif ่ฏญๅฅโ€ๅ’Œโ€œๅ‡ฝๆ•ฐโ€็š„็ซ ่Š‚๏ผŒ็›ดๅˆฐไฝ ๆ˜Ž็™ฝไธบๆญขใ€‚\n\nๆŽฅไธ‹ๆฅ็š„็ปƒไน ๅฐ†่ฎญ็ปƒไฝ ็š„ๅคง่„‘ๅŽป้˜…่ฏป่ฟ™ไบ›็ป“ๆž„ๅŒ–็š„ไปฃ็ ใ€‚่ฟ™ๅ’Œๆˆ‘ไปฌๅฐ†ๅธƒๅฐ”่กจ่พพๅผ็ƒงๅฝ•\\\nๅˆฐไฝ ็š„ๅคง่„‘ไธญ็š„่ฟ‡็จ‹ๆœ‰็‚น็ฑปไผผใ€‚\n\nๅ›žๅˆฐ while ๅพช็Žฏ๏ผŒๅฎƒๆ‰€ไฝœ็š„ๅ’Œ if ่ฏญๅฅ็ฑปไผผ๏ผŒไนŸๆ˜ฏๅŽปๆฃ€ๆŸฅไธ€ไธชๅธƒๅฐ”่กจ่พพๅผ็š„็œŸๅ‡๏ผŒไธ\\\nไธ€ๆ ท็š„ๆ˜ฏๅฎƒไธ‹้ข็š„ไปฃ็ ็‰‡ๆฎตไธๆ˜ฏๅช่ขซๆ‰ง่กŒไธ€ๆฌก๏ผŒ่€Œๆ˜ฏๆ‰ง่กŒๅฎŒๅŽๅ†่ฐƒๅ›žๅˆฐ ``while`` ๆ‰€ๅœจ\\\n็š„ไฝ็ฝฎ๏ผŒๅฆ‚ๆญค้‡ๅค่ฟ›่กŒ๏ผŒ็›ดๅˆฐ while ่กจ่พพๅผไธบ ``False`` ไธบๆญขใ€‚\n\nWhile ๅพช็Žฏๆœ‰ไธ€ไธช้—ฎ้ข˜๏ผŒ้‚ฃๅฐฑๆ˜ฏๆœ‰ๆ—ถๅฎƒไผšๆฐธไธ็ป“ๆŸใ€‚ๅฆ‚ๆžœไฝ ็š„็›ฎ็š„ๆ˜ฏๅพช็Žฏๅˆฐๅฎ‡ๅฎ™ๆฏ็ญไธบๆญข๏ผŒ\\\n้‚ฃ่ฟ™ๆ ทไนŸๆŒบๅฅฝ็š„๏ผŒไธ่ฟ‡ๅ…ถไป–็š„ๆƒ…ๅ†ตไธ‹ไฝ ็š„ๅพช็Žฏๆ€ป้œ€่ฆๆœ‰ไธ€ไธช็ป“ๆŸ็‚นใ€‚\n\nไธบไบ†้ฟๅ…่ฟ™ๆ ท็š„้—ฎ้ข˜๏ผŒไฝ ้œ€่ฆ้ตๅพชไธ‹้ข็š„่ง„ๅฎš๏ผš\n\n1. ๅฐฝ้‡ๅฐ‘็”จ ``while-loop``\\๏ผŒๅคง้ƒจๅˆ†ๆ—ถๅ€™ for-loop ๆ˜ฏๆ›ดๅฅฝ็š„้€‰ๆ‹ฉใ€‚\n2. ้‡ๅคๆฃ€ๆŸฅไฝ ็š„ while ่ฏญๅฅ๏ผŒ็กฎๅฎšไฝ ๆต‹่ฏ•็š„ๅธƒๅฐ”่กจ่พพๅผๆœ€็ปˆไผšๅ˜ๆˆ ``False`` ใ€‚\n3. ๅฆ‚ๆžœไธ็กฎๅฎš๏ผŒๅฐฑๅœจ ``while-loop`` ็š„็ป“ๅฐพๆ‰“ๅฐๅ‡บไฝ ่ฆๆต‹่ฏ•็š„ๅ€ผใ€‚็œ‹็œ‹ๅฎƒ็š„ๅ˜ๅŒ–ใ€‚\n\nๅœจ่ฟ™่Š‚็ปƒไน ไธญ๏ผŒไฝ ๅฐ†้€š่ฟ‡ไธŠ้ข็š„ไธ‰ๆ ทไบ‹ๆƒ…ๅญฆไผš ``while-loop`` ๏ผš\n\n.. literalinclude:: ex/ex33.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex33.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅฐ†่ฟ™ไธช while ๅพช็Žฏๆ”นๆˆไธ€ไธชๅ‡ฝๆ•ฐ๏ผŒๅฐ†ๆต‹่ฏ•ๆกไปถ(``i < 10``)ไธญ็š„ ``10`` ๆขๆˆไธ€ไธชๅ˜้‡ใ€‚\n2. ไฝฟ็”จ่ฟ™ไธชๅ‡ฝๆ•ฐ้‡ๅ†™ไฝ ็š„่„šๆœฌ๏ผŒๅนถ็”จไธๅŒ็š„ๆ•ฐๅญ—่ฟ›่กŒๆต‹่ฏ•ใ€‚\n3. ไธบๅ‡ฝๆ•ฐๆทปๅŠ ๅฆๅค–ไธ€ไธชๅ‚ๆ•ฐ๏ผŒ่ฟ™ไธชๅ‚ๆ•ฐ็”จๆฅๅฎšไน‰็ฌฌ 8 ่กŒ็š„ๅŠ ๅ€ผ ``+ 1`` ๏ผŒ่ฟ™ๆ ทไฝ ๅฐฑๅฏไปฅ\\\n ่ฎฉๅฎƒไปปๆ„ๅŠ ๅ€ผไบ†ใ€‚\n4. ๅ†ไฝฟ็”จ่ฏฅๅ‡ฝๆ•ฐ้‡ๅ†™ไธ€้่ฟ™ไธช่„šๆœฌใ€‚็œ‹็œ‹ๆ•ˆๆžœๅฆ‚ไฝ•ใ€‚\n5. ๆŽฅไธ‹ๆฅไฝฟ็”จ ``for-loop`` ๅ’Œ ``range`` ๆŠŠ่ฟ™ไธช่„šๆœฌๅ†ๅ†™ไธ€้ใ€‚ไฝ ่ฟ˜้œ€่ฆไธญ้—ด็š„ๅŠ ๅ€ผๆ“ไฝœๅ—๏ผŸ\\\n ๅฆ‚ๆžœไฝ ไธๅŽปๆމๅฎƒ๏ผŒไผšๆœ‰ไป€ไนˆๆ ท็š„็ป“ๆžœ๏ผŸ\n \nๅพˆๆœ‰ๅฏ่ƒฝไฝ ไผš็ขฐๅˆฐ็จ‹ๅบ่ท‘็€ๅœไธไธ‹ๆฅไบ†๏ผŒ่ฟ™ๆ—ถไฝ ๅช่ฆๆŒ‰็€ ``CTRL`` ๅ†ๆ•ฒ ``c`` (CTRL-c)๏ผŒ\\\n่ฟ™ๆ ท็จ‹ๅบๅฐฑไผšไธญๆ–ญไธ‹ๆฅไบ†ใ€‚\n" }, { "alpha_fraction": 0.5650297999382019, "alphanum_fraction": 0.5695500373840332, "avg_line_length": 29.030864715576172, "blob_id": "84e8003e162cd66ca5ab314e94e26c2b921a74dc", "content_id": "1ab17ad0fed7578071d3023906d30e8d7fd4aed5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4867, "license_type": "no_license", "max_line_length": 104, "num_lines": 162, "path": "/ex27.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 27: Memorizing Logic \n*****************************\n\nToday is the day you start learning about logic. Up to this point you have done \neverything you possibly can reading and writing files, to the terminal, and have\nlearned quite a lot of the math capabilities of Python.\n\nFrom now on, you will be learning *logic*. You won't learn complex theories\nthat academics love to study, but just the simple basic logic that\nmakes real programs work and that real programmers need every day.\n\nLearning logic has to come after you do some memorization. I want you\nto do this exercise for an entire week. Do not falter. Even if you are bored\nout of your mind, keep doing it. This exercise has a set of logic tables you\nmust memorize to make it easier for you to do the later exercises.\n\nI'm warning you this won't be fun at first. It will be downright boring and tedious\nbut this is to teach you a very important skill you will need as a programmer. You\n*will* need to be able to memorize important concepts as you go in your life.\nMost of these concepts will be exciting once you get them. You will struggle with\nthem, like wrestling a squid, then one day *snap* you will understand it. All\nthat work memorizing the basics pays off big later.\n\nHere's a tip on how to memorize something without going insane: Do a tiny bit\nat a time throughout the day and mark down what you need to work on most. Do\nnot try to sit down for two hours straight and memorize these tables.\nThis won't work. Your brain will really only retain whatever you studied in the\nfirst 15 or 30 minutes anyway.\n \nInstead, what you should do is create a bunch of index cards with each column\non the left on one side (True or False) and the column on the right on the back.\nYou should then pull them out, see the \"True or False\" and be able to immediately\nsay \"True!\" Keep practicing until you can do this.\n\nOnce you can do that, start writing out your own truth tables each night into a\nnotebook. Do not just copy them. Try to do them from memory, and when you get\nstuck glance quickly at the ones I have here to refresh your memory. Doing\nthis will train your brain to remember the whole table.\n\nDo not spend more than one week on this, because you will be applying it\nas you go.\n\n\nThe Truth Terms\n===============\n\nIn python we have the following terms (characters and phrases) for determining\nif something is \"True\" or \"False\". Logic on a computer is all about\nseeing if some combination of these characters and some variables is True\nat that point in the program.\n\n* ``and``\n* ``or``\n* ``not``\n* ``!=`` (not equal)\n* ``==`` (equal)\n* ``>=`` (greater-than-equal)\n* ``<=`` (less-than-equal)\n* True\n* False\n\nYou actually have run into these characters before, but maybe not the phrases.\nThe phrases (and, or, not) actually work the way you expect them to, just like\nin English.\n\n\nThe Truth Tables\n================\n\nWe will now use these characters to make the truth tables you need to memorize.\n\n\n========= =====\n NOT True?\n========= =====\nnot False True\n--------- -----\nnot True False\n========= =====\n\n\n============== =====\n OR True?\n============== =====\nTrue or False True\n-------------- -----\nTrue or True True\n-------------- -----\nFalse or True True\n-------------- -----\nFalse or False False\n============== =====\n\n\n=============== =====\n AND True?\n=============== =====\nTrue and False False\n--------------- -----\nTrue and True True\n--------------- -----\nFalse and True False\n--------------- -----\nFalse and False False\n=============== =====\n\n\n\n==================== =====\n NOT OR True?\n==================== =====\nnot (True or False) False\n-------------------- -----\nnot (True or True) False\n-------------------- -----\nnot (False or True) False\n-------------------- -----\nnot (False or False) True\n==================== =====\n\n\n===================== =====\n NOT AND True?\n===================== =====\nnot (True and False) True\n--------------------- -----\nnot (True and True) False\n--------------------- -----\nnot (False and True) True\n--------------------- -----\nnot (False and False) True\n===================== =====\n\n\n=============== =====\n != True?\n=============== =====\n1 != 0 True\n--------------- -----\n1 != 1 False\n--------------- -----\n0 != 1 True\n--------------- -----\n0 != 0 False\n=============== =====\n\n\n=============== =====\n == True?\n=============== =====\n1 == 0 False\n--------------- -----\n1 == 1 True\n--------------- -----\n0 == 1 False\n--------------- -----\n0 == 0 True\n=============== =====\n\n\nNow use these tables to write up your own cards and spend the week memorizing them. Remember though,\nthere is no failing in this book, just trying as hard as you can each day, and then a *little* bit more.\n\n\n" }, { "alpha_fraction": 0.7348484992980957, "alphanum_fraction": 0.7365846037864685, "avg_line_length": 48.87401580810547, "blob_id": "ca0e0706f79c62fd724fa8152c208fb80592afca", "content_id": "f66b710e2d0516a9d1fc7249e4bf78c56efc6e06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6336, "license_type": "no_license", "max_line_length": 88, "num_lines": 127, "path": "/ex45.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 45: Is-A, Has-A, Objects, and Classes\n**********************************************\n\nAn important concept that you have to understand is the difference between a\n``Class`` and an ``Object``. The problem is, there is no real \"difference\" between a\nclass and an object. They are actually the same thing at different points in\ntime. I will demonstrate by a Zen koan:\n\n``What is the difference between a Fish and a Salmon?``\n\nDid that question sort of confuse you? Really sit down and think about it for a\nminute. I mean, a Fish and a Salmon are different but, wait, they are the same\nthing right? A Salmon is a *kind* of Fish, so I mean it's not different. But\nat the same time, becase a Salmon is a particular *type* of Fish and so it's actually\ndifferent from all other Fish. That's what makes it a Salmon and not a\nHalibut. So a Salmon and a Fish are the same but different. Weird.\n\nThis question is confusing because most people do not think about real things\nthis way, but they intuitively understand them. You do not need to think about\nthe difference between a Fish and a Salmon because you *know* how they are\nrelated. You know a Salmon is a *kind* of Fish and that there are other kinds of\nFish without having to understand that.\n\nLet's take it one step further, let's say you have a bucket full of 3 Salmon\nand because you are a nice person, you have decided to name them Frank, Joe, and\nMary. Now, think about this question:\n\n``What is the difference between Mary and a Salmon?``\n\nAgain this is a weird question, but it's a bit easier than the Fish vs.\nSalmon question. You know that Mary is a Salmon, and so she's not really\ndifferent. She's just a specific \"instance\" of a Salmon. Joe and Frank are also\ninstances of Salmon. But, what do I mean when I say instance? I mean they were\ncreated from some other Salmon and now represent a real thing that has Salmon-like\nattributes.\n\nNow for the mind bending idea: Fish is a ``Class``, and Salmon is a ``Class``, and\nMary is an ``Object``. Think about that for a second. Alright let's break it \ndown real slow and see if you get it.\n\nA Fish is a ``Class``, meaning it's not a *real* thing, but rather a word we attach\nto instances of things with similar attributes. Got fins? Got gills? Lives in\nwater? Alright it's probably a Fish.\n\nSomeone with a Ph.D. then comes along and says, \"No my young friend, *this* Fish\nis actually *Salmo salar*, affectionately known as a Salmon.\" This professor\nhas just clarified the Fish further and made a new ``Class`` called \"Salmon\" that\nhas more specific attributes. Longer nose, reddish flesh, big, lives in the ocean or\nfresh water, tasty? Ok, probably a Salmon.\n\nFinally, a cook comes along and tells the Ph.D., \"No, you see this Salmon right\nhere, I'll call her Mary and I'm going to make a tasty fillet out of her with\na nice sauce.\" Now you have this *instance* of a Salmon (which also is an\ninstance of a Fish) named Mary turned into something real that is filling your\nbelly. It has become an ``Object``.\n\nThere you have it: Mary is a kind of Salmon that is a kind of Fish. ``Object`` is\na ``Class`` is a ``Class``.\n\nHow This Looks In Code\n----------------------\n\nThis is a weird concept, but to be very honest you only have to worry about it\nwhen you make new classes, and when you use a class. I will show you two\ntricks to help you figure out whether something is a ``Class`` or\n``Object``.\n\nFirst, you need to learn two catch phrases \"is-a\" and \"has-a\". You use the\nphrase is-a when you talk about objects and classes being related to each other\nby a class relationship. You use has-a when you talk about objects and classes\nthat are related only because they *reference* each other.\n\nNow, go through this piece of code and replace each ``##??`` comment with a\nreplacement comment that says whether the next line represents an ``is-a`` or a\n``has-a`` relationship, and what that relationship is. In the beginning of the\ncode, I've laid out a few examples, so you just have to write the remaining\nones.\n\nRemember, is-a is the relationship between Fish and Salmon, while has-a is the\nrelationship between Salmon and Gills.\n\n\n.. literalinclude:: ex/ex45.py\n :linenos:\n\n\nAbout class Name(object)\n========================\n\nRemember how I was yelling at you to always use ``class Name(object)`` and I couldn't\ntell you why? Now I can tell you, because you just learned about the difference between\na ``class`` and an ``object``. I couldn't tell you until now because you would have\njust been confused and couldn't learn to use the technology.\n\nWhat happened is Python's original rendition of ``class`` was broken in many serious\nways. By the time they admitted the fault it was too late, and they had to support \nit. In order to fix the problem, they needed some \"new class\" style so that the\n\"old classes\" would keep working but you could use the new more correct version.\n\nThis is where \"class is-a object\" comes in. They decided that they would use the\nword \"object\", lowercased, to be the \"class\" that you inherit from to make a class.\nConfusing right? A class inherits from the class named object to make a class but\nit's not an object really it's a class, but do not forget to inherit from object.\n\nExactly. The choice of one single word meant that I couldn't teach you about this\nuntil now. Now you can try to understand the concept of a class that is an\nobject if you like.\n\nHowever, I would suggest you do not. Just completely ignore the idea of old style\nvs. new style classes and assume that Python always requires (object) when you\nmake a class. Save your brain power for something important.\n\n\nExtra Credit\n============\n\n\n1. Research why Python added this strange ``object`` class, and what that means.\n2. Is it possible to use a ``Class`` like it's an ``Object``?\n3. Fill out the animals, fish, and people in this exercise with functions that make\n them do things. See what happens when functions are in a \"base class\" like Animal\n vs. in say Dog.\n4. Find other people's code and work out all the is-a and has-a relationships.\n5. Make some new relationships that are lists and dicts so you can also have \"has-many\"\n relationships.\n6. Do you think there's a such thing as a \"is-many\" relationship? Read about \"multiple\n inheritance\", then avoid it if you can.\n\n\n" }, { "alpha_fraction": 0.6916058659553528, "alphanum_fraction": 0.6989051103591919, "avg_line_length": 32.181819915771484, "blob_id": "0d4556ac49f71e4742368a1f45eeb61c42baba9a", "content_id": "82abc595c2f391c10c5f030fe775d70bc50227e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/ex7.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 7: More Printing\n*************************\n\nNow we are going to do a bunch of exercises where you just type code in\nand make it run. I won't be explaining much since it is just more of\nthe same. The purpose is to build up your chops. See you in a few \nexercises, and *do not skip!* Do not *paste!*\n\n.. literalinclude:: ex/ex7.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex7.txt\n :language: console\n\n\nExtra Credit\n============\n\nFor these next few exercises, you will have the exact same extra credit.\n\n1. Go back through and write a comment on what each line does.\n2. Read each one backwards or out loud to find your errors.\n3. From now on, when you make mistakes write down on a piece of paper what\n kind of mistake you made.\n4. When you go to the next exercise, look at the last mistakes you made and\n try not to make them in this new one.\n5. Remember that everyone makes mistakes. Programmers are like magicians who\n like everyone to think they are perfect and never wrong, but it's all an act.\n They make mistakes all the time.\n\n" }, { "alpha_fraction": 0.6132526397705078, "alphanum_fraction": 0.6253584027290344, "avg_line_length": 28.60377311706543, "blob_id": "7b339c0536eaa3c536f04ee795a6e6404bc3b075", "content_id": "ac931264f1963cf6aa0493136540a3ed65dc19d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3139, "license_type": "no_license", "max_line_length": 95, "num_lines": 106, "path": "/ex40.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 40: Dictionaries, Oh Lovely Dictionaries\n*************************************************\n\nNow I have to hurt you with another container you can use, because once\nyou learn this container a massive world of ultra-cool will be yours. It is\nthe most useful container ever: the dictionary.\n\nPython calls them \"dicts\", other languages call them, \"hashes\". I tend to use\nboth names, but it doesn't matter. What does matter is what they do when compared\nto lists. You see, a list lets you do this:\n\n.. code-block:: pycon\n\n >>> things = ['a', 'b', 'c', 'd']\n >>> print things[1]\n b\n >>> things[1] = 'z'\n >>> print things[1]\n z\n >>> print things\n ['a', 'z', 'c', 'd']\n >>> \n\nYou can use numbers to \"index\" into a list, meaning you can use numbers to \nfind out what's in lists. You should know this by now, but what a ``dict``\ndoes is let you use *anything*, not just numbers. Yes, a dict associates one thing\nto another, no matter what it is. Take a look:\n\n\n.. code-block:: pycon\n\n >>> stuff = {'name': 'Zed', 'age': 36, 'height': 6*12+2}\n >>> print stuff['name']\n Zed\n >>> print stuff['age']\n 36\n >>> print stuff['height']\n 74\n >>> stuff['city'] = \"San Francisco\"\n >>> print stuff['city']\n San Francisco\n >>> \n\nYou will see that instead of just numbers we're using strings to say what we\nwant from the ``stuff`` dictionary. We can also put new things into the\ndictionary with strings. It doesn't have to be strings though, we can also do\nthis:\n\n.. code-block:: pycon\n\n >>> stuff[1] = \"Wow\"\n >>> stuff[2] = \"Neato\"\n >>> print stuff[1]\n Wow\n >>> print stuff[2]\n Neato\n >>> print stuff\n {'city': 'San Francisco', 2: 'Neato', \n 'name': 'Zed', 1: 'Wow', 'age': 36, \n 'height': 74}\n >>>\n\nIn this one I just used numbers. I could use anything. Well almost but just pretend\nyou can use anything for now.\n\nOf course, a dictionary that you can only put things in is pretty stupid, so here's \nhow you delete things, with the ``del`` keyword:\n\n.. code-block:: pycon\n \n >>> del stuff['city']\n >>> del stuff[1]\n >>> del stuff[2]\n >>> stuff\n {'name': 'Zed', 'age': 36, 'height': 74}\n >>>\n\nWe'll now do an exercise that you *must* study very carefully. I want you to type this\nexercise in and try to understand what's going on. It is a very interesting exercise\nthat will hopefully make a big light turn on in your head very soon.\n\n\n.. literalinclude:: ex/ex40.py\n :linenos:\n\n\n.. warning:: Notice how I use ``themap`` instead of ``map``? That's because Python\n has a function called map, so if you try to use that you can have problems later.\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex40.txt\n :language: console\n\n\n\nExtra Credit\n============\n\n1. Go find the Python documentation for dictionaries (a.k.a. dicts, dict) and try to do\n even more things to them.\n2. Find out what you *can't* do with dictionaries. A big one is that they do not have order,\n so try playing with that.\n3. Try doing a ``for-loop`` over them, and then try the ``items()`` function in a ``for-loop``.\n\n" }, { "alpha_fraction": 0.6401028037071228, "alphanum_fraction": 0.6606683731079102, "avg_line_length": 15.8695650100708, "blob_id": "646b8773edfc797f32baba5093ced12e97b623e3", "content_id": "980942e8eb8095876012f95c6294ed725428fcdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 813, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/cn/ex24.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 24: ๆ›ดๅคš็ปƒไน \n**************************\n\nไฝ ็ฆป่ฟ™ๆœฌไนฆ็ฌฌไธ€้ƒจๅˆ†็š„็ป“ๅฐพๅทฒ็ปไธ่ฟœไบ†๏ผŒไฝ ๅบ”่ฏฅๅทฒ็ปๅ…ทๅค‡ไบ†่ถณๅคŸ็š„ Python ๅŸบ็ก€็Ÿฅ่ฏ†๏ผŒๅฏไปฅ\\\n็ปง็ปญๅญฆไน ไธ€ไบ›็ผ–็จ‹็š„ๅŽŸ็†ไบ†๏ผŒไฝ†ไฝ ๅบ”่ฏฅๅšๆ›ดๅคš็š„็ปƒไน ใ€‚่ฟ™ไธช็ปƒไน ็š„ๅ†…ๅฎนๆฏ”่พƒ้•ฟ๏ผŒๅฎƒ็š„็›ฎ็š„\\\nๆ˜ฏ้”ป็‚ผไฝ ็š„ๆฏ…ๅŠ›๏ผŒไธ‹ไธ€ไธชไน ้ข˜ไนŸๅทฎไธๅคšๆ˜ฏ่ฟ™ๆ ท็š„๏ผŒๅฅฝๅฅฝๅฎŒๆˆๅฎƒไปฌ๏ผŒๅšๅˆฐๅฎŒๅ…จๆญฃ็กฎ๏ผŒ่ฎฐๅพ—ไป”็ป†\\\nๆฃ€ๆŸฅใ€‚\n\n.. literalinclude:: ex/ex24.py\n :linenos:\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n.. literalinclude:: ex/ex24.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ่ฎฐๅพ—ไป”็ป†ๆฃ€ๆŸฅ็ป“ๆžœ๏ผŒไปŽๅŽๅพ€ๅ‰ๅ€’็€ๆฃ€ๆŸฅ๏ผŒๆŠŠไปฃ็ ๆœ—่ฏปๅ‡บๆฅ๏ผŒๅœจไธๆธ…ๆฅš็š„ไฝ็ฝฎๅŠ ไธŠๆณจ้‡Šใ€‚\n2. ๆ•…ๆ„ๆŠŠไปฃ็ ๆ”น้”™๏ผŒ่ฟ่กŒๅนถๆฃ€ๆŸฅไผšๅ‘็”Ÿไป€ไนˆๆ ท็š„้”™่ฏฏ๏ผŒๅนถไธ”็กฎ่ฎคไฝ ๆœ‰่ƒฝๅŠ›ๆ”นๆญฃ่ฟ™ไบ›้”™่ฏฏใ€‚\n\n" }, { "alpha_fraction": 0.726396918296814, "alphanum_fraction": 0.73483806848526, "avg_line_length": 44.983123779296875, "blob_id": "462b9e1d8a5677252dfe2220999fad3743b91dae", "content_id": "fbba81b71daaa893e9b4ad3bd6d98a478fd5f279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10899, "license_type": "no_license", "max_line_length": 159, "num_lines": 237, "path": "/ex52.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 52: The Start Of Your Web Game\n***************************************\n\nWe're coming to the end of the book, and in this exercise I'm going to\nreally challenge you. When you're done, you'll be a reasonably\ncompetent Python beginner. You'll still need to go through a few more\nbooks and write a couple more projects, but you'll have the skills to complete them.\nThe only thing in your way will be time, motivation, and resources.\n\nIn this exercise, we won't make a complete game, but instead we'll make an\n\"engine\" that can run the game from Exercise 42 in the browser. This will\ninvolve ``refactoring`` Exercise 42, mixing in the structure from Exercise 47,\nadding automated tests, and finally creating a web engine that can run the\ngames.\n\nThis exercise will be *huge*, and I predict you could spend anywhere from a week to months on it before moving on. It's best to attack it in little chunks and\ndo a bit a night, taking your time to make everything work before moving on.\n\n\nRefactoring The Exercise 42 Game\n================================\n\nYou've been altering the ``gothonweb`` project for two exercises and you'll\ndo it one more time in this exercise. The skill you're learning is called\n\"refactoring\", or as I like to call it, \"fixing stuff\". Refactoring is\na term programmers use to describe the process of taking old code, and changing\nit to have new features or just to clean it up. You've been doing this \nwithout even knowing it, as it's second nature to building software.\n\nWhat you'll do in this part is take the ideas from Exercise 47 of a testable\n\"map\" of Rooms, and the game from Exercise 42, and combine them together to\ncreate a new game structure. It will have the same content, just \"refactored\"\nto have a better structure.\n\nFirst step is to grab the code from ``ex47/game.py`` and copy it to \n``gothonweb/map.py`` and copy ``tests/ex47_tests.py`` file to ``tests/map_tests.py``\nand run ``nosetests`` again to make sure it keeps working.\n\n.. note:: \n\n From now on I won't show you the output of a test run, just assume that you should\n be doing it and it'll look like the above unless you have an error.\n\n\nOnce you have the code from Exercise 47 copied over, it's time to refactor it to\nhave the Exercise *42* map in it. I'm going to start off by laying down the basic\nstructure, and then you'll have an assignment to make the ``map.py`` file and the\n``map_tests.py`` file complete.\n\nFirst thing to do is lay out the basic structure of the map using the ``Room`` class\nas it is now:\n\n.. literalinclude:: ex/ex52/gothonweb/gothonweb/map.py\n :linenos:\n\nYou'll notice that there are a couple of problems with our ``Room`` class and this\nmap:\n\n1. We have to put the text that was in the ``if-else`` clauses that got\n printed *before* entering a room as part of each room. This means you can't\n shuffle the map around which would be nice. You'll be fixing that up\n in this exercise.\n2. There are parts in the original game where we ran code that determined\n things like the bomb's keypad code, or the right pod. In this game\n we just pick some defaults and go with it, but later you'll be\n given extra credit to make this work again.\n3. I've just made a ``generic_death`` ending for all of the bad decisions, which\n you'll have to finish for me. You'll need to go back through and add in all\n the original endings and make sure they work.\n4. I've got a new kind of transition labeled ``\"*\"`` that will be used for\n a \"catch-all\" action in the engine.\n\nOnce you've got that basically written out, here's the new automated test\n``tests/map_test.py`` that you should have to get yourself started:\n\n.. literalinclude:: ex/ex52/gothonweb/tests/map_tests.py\n :linenos:\n\nYour task in this part of the exercise is to complete the map, and make the\nautomated test completely validate the whole map. This includes fixing all\nthe ``generic_death`` objects to be real endings. Make sure this works\nreally well and that your test is as complete as possible because we'll be\nchanging this map later and you'll use the tests to make sure it keeps working.\n\n\nSessions And Tracking Users\n===========================\n\nAt a certain point in your web application you'll need to keep track of some information\nand associate it with the user's browser. The web (because of HTTP) is what we like\nto call \"stateless\", which means each request you make is independent of any other requests\nbeing made. If you request page A, put in some data, and click a link to page B, all\nthe data you sent to page A just disappears.\n\nThe solution to this is to create a little data store (usually in a database or on \nthe disk) that uses a number unique to each browser to keep track of what that\nbrowser was doing. In the little ``lpthw.web`` framework it's fairly easy, and there's\nan example showing how it's done:\n\n.. literalinclude:: ex/ex52/gothonweb/session_sample.py\n :linenos:\n\nTo make this work, you need to create a ``sessions/`` directory where the\napplication can put session storage. Do that, run this application and\ngo to ``/count``. Hit refresh and watch the counter go up. Close\nthe browser and it *forgets* who you are, which is what we want for the game.\nThere's a way to make the browser remember forever, but that makes testing\nand development harder. If you then go to ``/reset``, and back to ``/count``\nyou can see your counter reset because you've killed the session.\n\nTake the time to understand this code so you can see how the session starts\noff with the ``count`` equal to 0. Also try looking at the files in \n``sessions/`` to see if you can open them up. Here's a Python session where I\nopen up one and decode it:\n\n\n.. code-block:: pycon\n\n >>> import pickle\n >>> import base64\n >>> base64.b64decode(open(\"sessions/XXXXX\").read())\n \"(dp1\\nS'count'\\np2\\nI1\\nsS'ip'\\np3\\nV127.0.0.1\\np4\\nsS'session_id'\\np5\\nS'XXXX'\\np6\\ns.\"\n >>> \n >>> x = base64.b64decode(open(\"sessions/XXXXX\").read())\n >>> \n >>> pickle.loads(x)\n {'count': 1, 'ip': u'127.0.0.1', 'session_id': 'XXXXX'}\n\nThe sessions are really just dictionaries that get written to disk using ``pickle`` and \n``base64`` libraries. There are probably as many ways to store and manage sessions as\nthere are web frameworks, so it's not too important to know how these work. It does\nhelp if you need to debug the session or potentially clean them out.\n\n\nCreating An Engine\n==================\n\nYou should have your game map working and a good unit test for it. I now want\nto make a simple little game engine that will run the rooms, collect input from\nthe player, and keep track of where a play is in the game. We'll be using the\nsessions you just learned to make a simple game engine that will:\n\n1. Start a new game for new users.\n2. Present the room to the user.\n3. Take input from the user.\n4. Run their input through the game.\n5. Display the results and keep going until they die.\n\nTo do this, you're going to take the trusty ``bin/app.py`` you've been hacking on\nand create a fully working, session based, game engine. The catch is I'm\ngoing to make a very simple one with *basic HTML* files, and it'll be up to you to\ncomplete it. Here's the base engine:\n\n.. literalinclude:: ex/ex52/gothonweb/bin/app.py\n :linenos:\n\nThere are even more new things in this script, but amazingly it's an entire\nweb based game engine in a small file. The biggest \"hack\" in the script are\nthe lines that bring the sessions back, which is needed so that debug mode\nreloading works. Otherwise, each time you hit refresh the sessions will\ndisappear and the game won't work.\n\nBefore you run bin/app.py you need to change your PYTHONPATH environment variable.\nDon't know what that is? I know, it's kind of dumb you have to learn what this\nis to run even basic Python programs, but that's how Python people like things.\n\nIn your terminal, type:\n\n.. code-block:: console\n\n export PYTHONPATH=$PYTHONPATH:.\n\nOn Windows do:\n\n.. code-block:: console\n\n set PYTHONPATH=%PYTHONPATH%;.\n\nYou should only have to do it once per shell session, but if you get an import error,\nthen you probably need to do this or you did it wrong.\n\nYou should next delete ``templates/hello_form.html`` and ``templates/index.html`` and\ncreate the two templates mentioned in the above code. Here's a *very* simple \n``templates/show_room.html``:\n\n.. literalinclude:: ex/ex52/gothonweb/templates/show_room.html\n :linenos:\n\nThat is the template to show a room as you travel through the game. Next\nyou need one to tell someone they died in the case that they got to the end\nof the map on accident, which is ``templates/you_died.html``:\n\n.. literalinclude:: ex/ex52/gothonweb/templates/you_died.html\n :linenos:\n\nWith those in place, you should now be able to do the following:\n\n1. Get the test ``tests/app_tests.py`` working again so that you are testing\n the game. You won't be able to do much more than a few clicks in the \n game because of sessions, but you should be able to do some basics.\n2. Remove the ``sessions/*`` files and make sure you've started over.\n3. Run the ``python bin/app.py`` script and test out the game.\n\nYou should be able to refresh and fix the game like normal, and work with \nthe game HTML and engine until it does all the things you want it to do.\n\n\nYour Final Exam\n===============\n\nDo you feel like this was a huge amount of information thrown at you all at once?\nGood, I want you to have something to tinker with while you build your skills.\nTo complete this exercise, I'm going to give you a final set of exercises for\nyou to complete on your own. You'll notice that what you've written so far\nisn't very well built, it is just a first version of the code. Your task now\nis to make the game more complete by doing these things:\n\n1. Fix all the bugs I mention in the code, and any that I didn't mention.\n If you find new bugs, let me know.\n2. Improve all of the automated tests so that you test more of the application\n and get to a point where you use a test rather than your browser to check\n the application while you work.\n3. Make the HTML look better.\n4. Research logins and create a signup system for the application, so people\n can have logins and high scores.\n5. Complete the game map, making it as large and feature complete as possible.\n6. Give people a \"help\" system that lets them ask what they can do at each\n room in the game.\n7. Add any other features you can think of to the game.\n8. Create several \"maps\" and let people choose a game they want\n to run. Your ``bin/app.py`` engine should be able to run any map of\n rooms you give it, so you can support multiple games.\n9. Finally, use what you learned in Exercises 48 and 49 to create a better\n input processor. You have most of the code necessary, you just need to\n improve the grammar and hook it up to your input form and the ``GameEngine``.\n\nGood luck!\n\n" }, { "alpha_fraction": 0.672493577003479, "alphanum_fraction": 0.6884318590164185, "avg_line_length": 21.33333396911621, "blob_id": "bf37586f7fd120c79d7710de05a0d9918c8e774c", "content_id": "816a753f36c04d3f23a5178d4ae21fac350d2db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4357, "license_type": "no_license", "max_line_length": 57, "num_lines": 87, "path": "/cn/ex18.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 18: ๅ‘ฝๅใ€ๅ˜้‡ใ€ไปฃ็ ใ€ๅ‡ฝๆ•ฐ\n**********************************************\n\nๆ ‡้ข˜ๅŒ…ๅซ็š„ๅ†…ๅฎนๅคŸๅคš็š„ๅง๏ผŸๆŽฅไธ‹ๆฅๆˆ‘่ฆๆ•™ไฝ โ€œๅ‡ฝๆ•ฐ(function)โ€ไบ†๏ผๅ’šๅ’š้”ต๏ผ\\\n่ฏดๅˆฐๅ‡ฝๆ•ฐ๏ผŒไธไธ€ๆ ท็š„ไบบไผšๅฏนๅฎƒๆœ‰ไธไธ€ๆ ท็š„็†่งฃๅ’Œไฝฟ็”จๆ–นๆณ•๏ผŒไธ่ฟ‡ๆˆ‘ๅชไผšๆ•™ไฝ \\\n็Žฐๅœจ่ƒฝ็”จๅˆฐ็š„ๆœ€็ฎ€ๅ•็š„ไฝฟ็”จๆ–นๅผใ€‚\n\nๅ‡ฝๆ•ฐๅฏไปฅๅšไธ‰ๆ ทไบ‹ๆƒ…๏ผš\n\n1. ๅฎƒไปฌ็ป™ไปฃ็ ็‰‡ๆฎตๅ‘ฝๅ๏ผŒๅฐฑ่ทŸโ€œๅ˜้‡โ€็ป™ๅญ—็ฌฆไธฒๅ’Œๆ•ฐๅญ—ๅ‘ฝๅไธ€ๆ ทใ€‚\n2. ๅฎƒไปฌๅฏไปฅๆŽฅๅ—ๅ‚ๆ•ฐ๏ผŒๅฐฑ่ทŸไฝ ็š„่„šๆœฌๆŽฅๅ— ``argv`` ไธ€ๆ ทใ€‚\n3. ้€š่ฟ‡ไฝฟ็”จ #1 ๅ’Œ #2๏ผŒๅฎƒไปฌๅฏไปฅ่ฎฉไฝ ๅˆ›ๅปบโ€œๅพฎๅž‹่„šๆœฌโ€ๆˆ–่€…โ€œๅฐๅ‘ฝไปคโ€ใ€‚\n\nไฝ ๅฏไปฅไฝฟ็”จ ``def`` ๆ–ฐๅปบๅ‡ฝๆ•ฐใ€‚ๆˆ‘ๅฐ†่ฎฉไฝ ๅˆ›ๅปบๅ››ไธชไธๅŒ็š„ๅ‡ฝๆ•ฐ๏ผŒๅฎƒไปฌๅทฅไฝœ่ตทๆฅๅ’Œไฝ ็š„\\\n่„šๆœฌไธ€ๆ ทใ€‚็„ถๅŽๆˆ‘ไผšๆผ”็คบ็ป™ไฝ ๅ„ไธชๅ‡ฝๆ•ฐไน‹้—ด็š„ๅ…ณ็ณปใ€‚\n\n.. literalinclude:: ex/ex18.py\n :linenos:\n\n่ฎฉๆˆ‘ไปฌๆŠŠไฝ ไธ€ไธชๅ‡ฝๆ•ฐ ``print_two`` ่‚ข่งฃไธ€ไธ‹๏ผŒ่ฟ™ไธชๅ‡ฝๆ•ฐๅ’Œไฝ ๅ†™่„šๆœฌ็š„ๆ–นๅผๅทฎไธๅคš๏ผŒ\\\nๅ› ๆญคไฝ ็œ‹ไธŠๅŽปๅบ”่ฏฅไผš่ง‰็€ๆฏ”่พƒ็œผ็†Ÿ๏ผš\n\n1. ้ฆ–ๅ…ˆๆˆ‘ไปฌๅ‘Š่ฏ‰ Python ๅˆ›ๅปบไธ€ไธชๅ‡ฝๆ•ฐ๏ผŒๆˆ‘ไปฌไฝฟ็”จๅˆฐ็š„ๅ‘ฝไปคๆ˜ฏ ``def`` ๏ผŒไนŸๅฐฑๆ˜ฏ\\\n โ€œๅฎšไน‰(define)โ€็š„ๆ„ๆ€ใ€‚\n2. ็ดงๆŽฅ็€ ``def`` ็š„ๆ˜ฏๅ‡ฝๆ•ฐ็š„ๅ็งฐใ€‚ๆœฌไพ‹ไธญๅฎƒ็š„ๅ็งฐๆ˜ฏ \"print_two\"๏ผŒไฝ†ๅๅญ—ๅฏไปฅ\\\n ้šไพฟๅ–๏ผŒๅฐฑๅซ \"peanuts\" ไนŸๆฒกๅ…ณ็ณปใ€‚ไฝ†ๆœ€ๅฅฝๅ‡ฝๆ•ฐ็š„ๅ็งฐ่ƒฝๅคŸไฝ“็Žฐๅ‡บๅ‡ฝๆ•ฐ็š„ๅŠŸ่ƒฝๆฅใ€‚\n3. ็„ถๅŽๆˆ‘ไปฌๅ‘Š่ฏ‰ๅ‡ฝๆ•ฐๆˆ‘ไปฌ้œ€่ฆ ``*args`` (asterisk args)๏ผŒ่ฟ™ๅ’Œ่„šๆœฌ็š„ ``argv`` \n ้žๅธธ็›ธไผผ๏ผŒๅ‚ๆ•ฐๅฟ…้กปๆ”พๅœจๅœ†ๆ‹ฌๅท ``()`` ไธญๆ‰่ƒฝๆญฃๅธธๅทฅไฝœใ€‚\n4. ๆŽฅ็€ๆˆ‘ไปฌ็”จๅ†’ๅท ``:`` ็ป“ๆŸๆœฌ่กŒ๏ผŒ็„ถๅŽๅผ€ๅง‹ไธ‹ไธ€่กŒ็ผฉ่ฟ›ใ€‚\n5. ๅ†’ๅทไปฅไธ‹๏ผŒไฝฟ็”จ 4 ไธช็ฉบๆ ผ็ผฉ่ฟ›็š„่กŒ้ƒฝๆ˜ฏๅฑžไบŽ ``print_two`` ่ฟ™ไธชๅ‡ฝๆ•ฐ็š„ๅ†…ๅฎนใ€‚\n ๅ…ถไธญ็ฌฌไธ€่กŒ็š„ไฝœ็”จๆ˜ฏๅฐ†ๅ‚ๆ•ฐ่งฃๅŒ…๏ผŒ่ฟ™ๅ’Œ่„šๆœฌๅ‚ๆ•ฐ่งฃๅŒ…็š„ๅŽŸ็†ๅทฎไธๅคšใ€‚\n6. ไธบไบ†ๆผ”็คบๅฎƒ็š„ๅทฅไฝœๅŽŸ็†๏ผŒๆˆ‘ไปฌๆŠŠ่งฃๅŒ…ๅŽ็š„ๆฏไธชๅ‚ๆ•ฐ้ƒฝๆ‰“ๅฐๅ‡บๆฅ๏ผŒ่ฟ™ๅ’Œๆˆ‘ไปฌๅœจไน‹ๅ‰่„šๆœฌ\\\n ็ปƒไน ไธญๆ‰€ไฝœ็š„็ฑปไผผใ€‚\n\nๅ‡ฝๆ•ฐ ``print_two`` ็š„้—ฎ้ข˜ๆ˜ฏ๏ผšๅฎƒๅนถไธๆ˜ฏๅˆ›ๅปบๅ‡ฝๆ•ฐๆœ€็ฎ€ๅ•็š„ๆ–นๆณ•ใ€‚ๅœจ Python ๅ‡ฝๆ•ฐไธญๆˆ‘ไปฌ\\\nๅฏไปฅ่ทณ่ฟ‡ๆ•ดไธชๅ‚ๆ•ฐ่งฃๅŒ…็š„่ฟ‡็จ‹๏ผŒ็›ดๆŽฅไฝฟ็”จ ``()`` ้‡Œ่พน็š„ๅ็งฐไฝœไธบๅ˜้‡ๅใ€‚่ฟ™ๅฐฑๆ˜ฏ\n``print_two_again`` ๅฎž็Žฐ็š„ๅŠŸ่ƒฝใ€‚\n\nๆŽฅไธ‹ๆฅ็š„ไพ‹ๅญๆ˜ฏ ``print_one`` ๏ผŒๅฎƒๅ‘ไฝ ๆผ”็คบไบ†ๅ‡ฝๆ•ฐๅฆ‚ไฝ•ๆŽฅๅ—ๅ•ไธชๅ‚ๆ•ฐใ€‚\n\nๆœ€ๅŽไธ€ไธชไพ‹ๅญๆ˜ฏ ``print_none`` ๏ผŒๅฎƒๅ‘ไฝ ๆผ”็คบไบ†ๅ‡ฝๆ•ฐๅฏไปฅไธๆŽฅๆ”ถไปปไฝ•ๅ‚ๆ•ฐใ€‚\n\n.. warning::\n\n ๅฆ‚ๆžœไฝ ไธๅคช่ƒฝ็œ‹ๆ‡‚ไธŠ้ข็š„ๅ†…ๅฎนไนŸๅˆซๆฐ”้ฆใ€‚ๅŽ้ขๆˆ‘ไปฌ่ฟ˜ๆœ‰ๆ›ดๅคš็š„็ปƒไน ๅ‘ไฝ ๅฑ•็คบ\\\n ๅฆ‚ไฝ•ๅˆ›ๅปบๅ’Œไฝฟ็”จๅ‡ฝๆ•ฐใ€‚็Žฐๅœจไฝ ๅช่ฆๆŠŠๅ‡ฝๆ•ฐ็†่งฃๆˆโ€œ่ฟทไฝ ่„šๆœฌโ€ๅฐฑๅฏไปฅไบ†ใ€‚\n\n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\n่ฟ่กŒไธŠ้ข็š„่„šๆœฌไผš็œ‹ๅˆฐๅฆ‚ไธ‹็ป“ๆžœ:\n\n.. literalinclude:: ex/ex18.txt\n\nไฝ ๅบ”่ฏฅๅทฒ็ป็œ‹ๅ‡บๅ‡ฝๆ•ฐๆ˜ฏๆ€Žๆ ทๅทฅไฝœ็š„ไบ†ใ€‚ๆณจๆ„ๅˆฐๅ‡ฝๆ•ฐ็š„็”จๆณ•ๅ’Œไฝ ไปฅๅ‰่ง่ฟ‡็š„ ``exists``\\ใ€\n``open``\\๏ผŒไปฅๅŠๅˆซ็š„โ€œๅ‘ฝไปคโ€ๆœ‰็‚น็ฑปไผผไบ†ๅง๏ผŸๅ…ถๅฎžๆˆ‘ๅชๆ˜ฏไธบไบ†่ฎฉไฝ ๅฎนๆ˜“็†่งฃๆ‰ๅซๅฎƒไปฌโ€œๅ‘ฝไปคโ€๏ผŒ\nๅฎƒไปฌ็š„ๆœฌ่ดจๅ…ถๅฎžๅฐฑๆ˜ฏๅ‡ฝๆ•ฐใ€‚ไนŸๅฐฑๆ˜ฏ่ฏด๏ผŒไฝ ไนŸๅฏไปฅๅœจ่‡ชๅทฑ็š„่„šๆœฌไธญๅˆ›ๅปบไฝ ่‡ชๅทฑ็š„โ€œๅ‘ฝไปคโ€ใ€‚\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\nไธบ่‡ชๅทฑๅ†™ไธ€ไธช\\ ``ๅ‡ฝๆ•ฐๆณจๆ„ไบ‹้กน``\\ไปฅไพ›ๅŽ็ปญๅ‚่€ƒใ€‚ไฝ ๅฏไปฅๅ†™ๅœจไธ€ไธช็ดขๅผ•ๅก็‰‡ไธŠ้šๆ—ถ้˜…่ฏป๏ผŒ\\\n็›ดๅˆฐไฝ ่ฎฐไฝๆ‰€ๆœ‰็š„่ฆ็‚นไธบๆญขใ€‚ๆณจๆ„ไบ‹้กนๅฆ‚ไธ‹๏ผš\n\n1. ๅ‡ฝๆ•ฐๅฎšไน‰ๆ˜ฏไปฅ ``def`` ๅผ€ๅง‹็š„ๅ—๏ผŸ\n2. ๅ‡ฝๆ•ฐๅ็งฐๆ˜ฏไปฅๅญ—็ฌฆๅ’Œไธ‹ๅˆ’็บฟ ``_`` ็ป„ๆˆ็š„ๅ—๏ผŸ\n3. ๅ‡ฝๆ•ฐๅ็งฐๆ˜ฏไธๆ˜ฏ็ดง่ทŸ็€ๆ‹ฌๅท ``(`` ๏ผŸ\n4. ๆ‹ฌๅท้‡Œๆ˜ฏๅฆๅŒ…ๅซๅ‚ๆ•ฐ๏ผŸๅคšไธชๅ‚ๆ•ฐๆ˜ฏๅฆไปฅ้€—ๅท้š”ๅผ€๏ผŸ\n5. ๅ‚ๆ•ฐๅ็งฐๆ˜ฏๅฆๆœ‰้‡ๅค๏ผŸ๏ผˆไธ่ƒฝไฝฟ็”จ้‡ๅค็š„ๅ‚ๆ•ฐๅ๏ผ‰\n6. ็ดง่ทŸ็€ๅ‚ๆ•ฐ็š„ๆ˜ฏไธๆ˜ฏๆ‹ฌๅทๅ’Œๅ†’ๅท ``):`` ๏ผŸ\n7. ็ดง่ทŸ็€ๅ‡ฝๆ•ฐๅฎšไน‰็š„ไปฃ็ ๆ˜ฏๅฆไฝฟ็”จไบ† 4 ไธช็ฉบๆ ผ็š„็ผฉ่ฟ› (``indent``)๏ผŸ\n8. ๅ‡ฝๆ•ฐ็ป“ๆŸ็š„ไฝ็ฝฎๆ˜ฏๅฆๅ–ๆถˆไบ†็ผฉ่ฟ› (\"dedent\")๏ผŸ\n\nๅฝ“ไฝ ่ฟ่กŒ๏ผˆๆˆ–่€…่ฏดโ€œไฝฟ็”จ useโ€ๆˆ–่€…โ€œ่ฐƒ็”จ callโ€๏ผ‰ไธ€ไธชๅ‡ฝๆ•ฐๆ—ถ๏ผŒ่ฎฐๅพ—ๆฃ€ๆŸฅไธ‹้ข็š„่ฆ็‚น๏ผš\n\n1. ่ฐƒ่ฟๅ‡ฝๆ•ฐๆ—ถๆ˜ฏๅฆไฝฟ็”จไบ†ๅ‡ฝๆ•ฐ็š„ๅ็งฐ๏ผŸ\n2. ๅ‡ฝๆ•ฐๅ็งฐๆ˜ฏๅฆ็ดง่ทŸ็€ ``(`` ๏ผŸ\n3. ๆ‹ฌๅทๅŽๆœ‰ๆ— ๅ‚ๆ•ฐ๏ผŸๅคšไธชๅ‚ๆ•ฐๆ˜ฏๅฆไปฅ้€—ๅท้š”ๅผ€๏ผŸ\n4. ๅ‡ฝๆ•ฐๆ˜ฏๅฆไปฅ ``)`` ็ป“ๅฐพ๏ผŸ\n\nๆŒ‰็…ง่ฟ™ไธคไปฝๆฃ€ๆŸฅ่กจ้‡Œ็š„ๅ†…ๅฎนๆฃ€ๆŸฅไฝ ็š„็ปƒไน ๏ผŒ็›ดๅˆฐไฝ ไธ้œ€่ฆๆฃ€ๆŸฅ่กจไธบๆญขใ€‚\n\nๆœ€ๅŽ๏ผŒๅฐ†ไธ‹้ข่ฟ™ๅฅ่ฏ้˜…่ฏปๅ‡ ้๏ผš\n\nโ€œโ€˜่ฟ่กŒๅ‡ฝๆ•ฐ(run)โ€™ใ€โ€˜่ฐƒ็”จๅ‡ฝๆ•ฐ(call)โ€™ใ€ๅ’Œ โ€˜ไฝฟ็”จๅ‡ฝๆ•ฐ(use)โ€™ๆ˜ฏๅŒไธ€ไธชๆ„ๆ€โ€\n\n\n" }, { "alpha_fraction": 0.6906585097312927, "alphanum_fraction": 0.7075038552284241, "avg_line_length": 18.727272033691406, "blob_id": "4b8a206a7f56bc22c20b8abc01a7926a77c0ebad", "content_id": "0b56a8acee9a2b123424df87f824da03dd7f3253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 53, "num_lines": 33, "path": "/cn/ex19.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "ไน ้ข˜ 19: ๅ‡ฝๆ•ฐๅ’Œๅ˜้‡\n************************************\n\nๅ‡ฝๆ•ฐ่ฟ™ไธชๆฆ‚ๅฟตไนŸ่ฎธๆ‰ฟ่ฝฝไบ†ๅคชๅคš็š„ไฟกๆฏ้‡๏ผŒไธ่ฟ‡ๅˆซๆ‹…ๅฟƒใ€‚ๅช่ฆๅšๆŒๅš่ฟ™ไบ›็ปƒไน ๏ผŒๅฏน็…งไธŠไธช็ปƒไน \\\nไธญ็š„ๆฃ€ๆŸฅ็‚นๆฃ€ๆŸฅไธ€้่ฟ™ๆฌก็š„่”็ณป๏ผŒไฝ ๆœ€็ปˆไผšๆ˜Ž็™ฝ่ฟ™ไบ›ๅ†…ๅฎน็š„ใ€‚\n\nๆœ‰ไธ€ไธชไฝ ๅฏ่ƒฝๆฒกๆœ‰ๆณจๆ„ๅˆฐ็š„็ป†่Š‚๏ผŒๆˆ‘ไปฌ็Žฐๅœจๅผบ่ฐƒไธ€ไธ‹๏ผšๅ‡ฝๆ•ฐ้‡Œ่พน็š„ๅ˜้‡ๅ’Œ่„šๆœฌ้‡Œ่พน็š„ๅ˜้‡\\\nไน‹้—ดๆ˜ฏๆฒกๆœ‰่ฟžๆŽฅ็š„ใ€‚ไธ‹้ข็š„่ฟ™ไธช็ปƒไน ๅฏไปฅ่ฎฉไฝ ๅฏน่ฟ™ไธ€็‚นๆœ‰ๆ›ดๅคš็š„ๆ€่€ƒ๏ผš\n\n.. literalinclude:: ex/ex19.py\n :linenos:\n\n้€š่ฟ‡่ฟ™ไธช็ปƒไน ๏ผŒไฝ ็œ‹ๅˆฐๆˆ‘ไปฌ็ป™ๆˆ‘ไปฌ็š„ๅ‡ฝๆ•ฐ ``cheese_and_crackers`` ๅพˆๅคš็š„ๅ‚ๆ•ฐ๏ผŒ็„ถๅŽๅœจ\\\nๅ‡ฝๆ•ฐ้‡ŒๆŠŠๅฎƒไปฌๆ‰“ๅฐๅ‡บๆฅใ€‚ๆˆ‘ไปฌๅฏไปฅๅœจๅ‡ฝๆ•ฐ้‡Œ็”จๅ˜้‡ๅ๏ผŒๆˆ‘ไปฌๅฏไปฅๅœจๅ‡ฝๆ•ฐ้‡Œๅš่ฟ็ฎ—๏ผŒๆˆ‘ไปฌ็”š่‡ณ\\\nๅฏไปฅๅฐ†ๅ˜้‡ๅ’Œ่ฟ็ฎ—็ป“ๅˆ่ตทๆฅใ€‚\n\nไปŽไธ€ๆ–น้ขๆฅ่ฏด๏ผŒๅ‡ฝๆ•ฐ็š„ๅ‚ๆ•ฐๅ’Œๆˆ‘ไปฌ็š„็”Ÿๆˆๅ˜้‡ๆ—ถ็”จ็š„ ``=`` ่ต‹ๅ€ผ็ฌฆ็ฑปไผผใ€‚ไบ‹ๅฎžไธŠ๏ผŒๅฆ‚ๆžœ\\\nไธ€ไธช็‰ฉไปถไฝ ๅฏไปฅ็”จ ``=`` ๅฐ†ๅ…ถๅ‘ฝๅ๏ผŒไฝ ้€šๅธธไนŸๅฏไปฅๅฐ†ๅ…ถไฝœไธบๅ‚ๆ•ฐไผ ้€’็ป™ไธ€ไธชๅ‡ฝๆ•ฐใ€‚ \n\nไฝ ๅบ”่ฏฅ็œ‹ๅˆฐ็š„็ป“ๆžœ\n===================\n\nไฝ ๅบ”่ฏฅ็ ”็ฉถไธ€ไธ‹่„šๆœฌ็š„่พ“ๅ‡บ๏ผŒๅ’Œไฝ ๆƒณ่ฑก็š„็ป“ๆžœๅฏนๆฏ”ไธ€ไธ‹็œ‹ๆœ‰ไป€ไนˆไธๅŒใ€‚\n\n.. literalinclude:: ex/ex19.txt\n\n\nๅŠ ๅˆ†ไน ้ข˜\n============\n\n1. ๅ€’็€ๅฐ†่„šๆœฌ่ฏปๅฎŒ๏ผŒๅœจๆฏไธ€่กŒไธŠ้ขๆทปๅŠ ไธ€่กŒๆณจ่งฃ๏ผŒ่ฏดๆ˜Ž่ฟ™่กŒ็š„ไฝœ็”จใ€‚\n2. ไปŽๆœ€ๅŽไธ€่กŒๅผ€ๅง‹๏ผŒๅ€’็€้˜…่ฏปๆฏไธ€่กŒ๏ผŒ่ฏปๅ‡บๆ‰€ๆœ‰็š„้‡่ฆๅญ—็ฌฆๆฅใ€‚\n3. ่‡ชๅทฑ็ผ–่‡ณๅฐ‘ไธ€ไธชๅ‡ฝๆ•ฐๅ‡บๆฅ๏ผŒ็„ถๅŽ็”จ10็งๆ–นๆณ•่ฟ่กŒ่ฟ™ไธชๅ‡ฝๆ•ฐใ€‚\n\n\n" }, { "alpha_fraction": 0.7009658217430115, "alphanum_fraction": 0.7072808146476746, "avg_line_length": 40.400001525878906, "blob_id": "6779a17b50b8894f78673da34dd59d1cc5caacc3", "content_id": "170b3120c7504023de33470dddb884e7c049088d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2692, "license_type": "no_license", "max_line_length": 82, "num_lines": 65, "path": "/ex32.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 32: Loops And Lists\n****************************\n\nYou should now be able to do some programs that are much more interesting.\nIf you have been keeping up, you should realize that now you can combine\nall the other things you have learned with ``if-statements`` and boolean\nexpressions to make your programs do smart things.\n\nHowever, programs also need to do repetitive things very quickly. We are going\nto use a ``for-loop`` in this exercise to build and print various lists. When\nyou do the exercise, you will start to figure out what they are. I won't tell you\nright now. You have to figure it out.\n\nBefore you can use a for-loop, you need a way to *store* the results of loops\nsomewhere. The best way to do this is with a ``list``. A list is exactly\nwhat its name says, a container of things that are organized in order.\nIt's not complicated; you just have to learn a new syntax. First, there's how\nyou make a list:\n\n.. code-block:: python\n \n hairs = ['brown', 'blond', 'red']\n eyes = ['brown', 'blue', 'green']\n weights = [1, 2, 3, 4]\n\nWhat you do is start the list with the ``[`` (left-bracket) which \"opens\" the\nlist. Then you put each item you want in the list separated by commas, just\nlike when you did function arguments. Lastly you end the list with a ``]``\n(right-bracket) to indicate that it's over. Python then takes this list and\nall its contents, and assigns them to the variable.\n\n.. warning::\n\n This is where things get tricky for people who can't program. Your\n brain has been taught that the world is flat. Remember in the last\n exercise where you put ``if-statements`` inside ``if-statements``? That\n probably made your brain hurt because most people do not ponder how to\n \"nest\" things inside things. In programming this is all over the place.\n You will find functions that call other functions that have ``if-statements``\n that have lists with lists inside lists. If you see a structure like this\n that you can't figure out, take out pencil and paper and break it down\n manually bit by bit until you understand it.\n\nWe now will build some lists using some loops and print them out:\n\n.. literalinclude:: ex/ex32.py\n :linenos:\n\n\nWhat You Should See\n===================\n\n.. literalinclude:: ex/ex32.txt\n :language: console\n\n\nExtra Credit\n============\n\n1. Take a look at how you used ``range``. Look up the ``range`` function to \n understand it.\n2. Could you have avoided that ``for-loop`` entirely on line 22 and just assigned\n ``range(0,6)`` directly to ``elements``?\n3. Find the Python documentation on lists and read about them. What other\n operations can you do to lists besides ``append``?\n\n" }, { "alpha_fraction": 0.7015544176101685, "alphanum_fraction": 0.7121761441230774, "avg_line_length": 37.20792007446289, "blob_id": "c5f49f7d42db729fa377cd0d8a9595812da6be19", "content_id": "40aa165193b104ee0f40bf6cf0bbc1c9e5a86b47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3860, "license_type": "no_license", "max_line_length": 79, "num_lines": 101, "path": "/ex13.rst", "repo_name": "gastlygem/lpthw-cn", "src_encoding": "UTF-8", "text": "Exercise 13: Parameters, Unpacking, Variables\n*********************************************\n\nIn this exercise we will cover one more input method you can use to pass\nvariables to a script (script being another name for your ``.py`` files). You\nknow how you type ``python ex13.py`` to run the ``ex13.py`` file? Well the\n``ex13.py`` part of the command is called an \"argument\". What we'll do now is\nwrite a script that also accepts arguments.\n\nType this program and I'll explain it in detail:\n\n.. literalinclude:: ex/ex13.py\n :linenos:\n\nOn line ``1`` we have what's called an \"import\". This is how you add\nfeatures to your script from the Python feature set. Rather than\ngive you all the features at once, Python asks you to say what \nyou plan to use. This keeps your programs small, but it also acts\nas documentation for other programmers who read your code later.\n\nThe ``argv`` is the \"argument variable\", a very standard name in programming,\nthat you will find used in many other languages. This variable *holds* the\narguments you pass to your Python script when you run it. In the exercises you\nwill get to play with this more and see what happens.\n\nLine 3 \"unpacks\" ``argv`` so that, rather than holding all the arguments, it\ngets assigned to four variables you can work with: ``script``, ``first``,\n``second``, and ``third``. This may look strange, but \"unpack\" is probably the\nbest word to describe what it does. It just says, \"Take whatever is in argv,\nunpack it, and assign it to all of these variables on the left in order.\"\n\nAfter that we just print them out like normal.\n\nHold Up! Features Have Another Name\n===================================\n\nI call them \"features\" here (these little things you ``import`` to make\nyour Python program do more) but nobody else calls them features. I just \nused that name because I needed to trick you into learning what they\nare without jargon. Before you can continue, you need to learn\ntheir real name: ``modules``.\n\nFrom now on we will be calling these \"features\" that we ``import`` *modules*.\nI'll say things like, \"You want to import the ``sys`` module.\" They are\nalso called \"libraries\" by other programmers, but let's just stick with\nmodules.\n\n\nWhat You Should See\n===================\n\nRun the program like this (and you *must* pass *three* command line arguments):\n\n.. code-block:: console\n\n python ex13.py first 2nd 3rd\n\nThis is what you should see when you do a few different runs\nwith different arguments:\n\n.. literalinclude:: ex/ex13.txt\n :language: console\n\nYou can actually replace \"first\", \"2nd\", and \"3rd\" with any three things.\nYou do not have to give these parameters either, you can give any 3 strings\nyou want:\n\n.. code-block:: console\n\n python ex13.py stuff I like\n python ex13.py anything 6 7\n\n\nIf you do not run it correctly, then you will get an error like this:\n\n.. code-block:: console\n\n python ex13.py first 2nd\n Traceback (most recent call last):\n File \"ex/ex13.py\", line 3, in <module>\n script, first, second, third = argv\n ValueError: need more than 3 values to unpack\n\nThis happens when you do not put enough arguments on the command when you\nrun it (in this case just ``first 2nd``). Notice when I run it I give it\n``first 2nd``, which caused it to give an error about \"need more than 3\nvalues to unpack\" telling you that you didn't give it enough parameters.\n\n\n\nExtra Credit\n============\n\n1. Try giving fewer than three arguments to your script. See that error \n you get? See if you can explain it.\n2. Write a script that has fewer arguments and one that has more. Make\n sure you give the unpacked variables good names.\n3. Combine ``raw_input`` with ``argv`` to make a script that gets more\n input from a user.\n4. Remember that modules give you features. Modules. Modules. Remember this\n because we'll need it later.\n\n" } ]
93
sudhir-j-sapkal/python-basic-examples
https://github.com/sudhir-j-sapkal/python-basic-examples
0385aaa2609a43c29b569973500f0fca4da7f503
98814de188cb92b13387111d9a673e529d508747
2ed60bdb938297b466964e2fd1c9e1897f51d9f1
refs/heads/master
2022-09-29T19:01:29.926415
2022-09-22T13:09:47
2022-09-22T13:09:47
164,411,646
8
3
null
null
null
null
null
[ { "alpha_fraction": 0.708185076713562, "alphanum_fraction": 0.7295373678207397, "avg_line_length": 22.41666603088379, "blob_id": "6857c05dca7fef528561a61802172d81bf54d3f4", "content_id": "87116a742e6c2f43fa1c24efbef71088a76a3dd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 83, "num_lines": 12, "path": "/13MysqlWithPython/05ConnectToDB.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "import mysql.connector\n#Create Connection to DB\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"root\",\n database=\"mypython_test_db\"\n)\n\n\nmycursor = mydb.cursor()\nmycursor.execute(\"CREATE TABLE employee (name VARCHAR(255), address VARCHAR(255))\")\n" }, { "alpha_fraction": 0.5941422581672668, "alphanum_fraction": 0.5941422581672668, "avg_line_length": 17.384614944458008, "blob_id": "927550d0ae73d7f4c1f4eff93337fc12bd502e82", "content_id": "e9fe1f2a7725d1a7ee4b593d0cc7a568e3db0ff6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/14Flask/variable_rule.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "from flask import Flask\napp = Flask(__name__)\n\[email protected]('/')\ndef initMyApp():\n return \"This is Home\"\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n return 'Hello %s!' % name\n\nif __name__ == '__main__':\n app.run(debug = True)\n" }, { "alpha_fraction": 0.598901093006134, "alphanum_fraction": 0.6950549483299255, "avg_line_length": 19.22222137451172, "blob_id": "d95cd997b27dab12e7f52c2510688b23c95a5f4a", "content_id": "6728b7fcf8eb97948278453136c7e53816243522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 78, "num_lines": 18, "path": "/01PythonObjectDataStructure/03Float.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Normal Float varibles\nmy_float_1 = 1.10\nmy_float_2 = 1.0\nmy_float_3 = -35.59\n\nprint(type(my_float_1))\nprint(type(my_float_2))\nprint(type(my_float_3))\n\n#Float can also be scientific numbers with an \"e\" to indicate the power of 10.\n\nmy_float_4 = 35e3\nmy_float_5 = 12E4\nmy_float_6 = -87.7e100\n\nprint(type(my_float_4))\nprint(type(my_float_5))\nprint(type(my_float_6))\n" }, { "alpha_fraction": 0.5734015107154846, "alphanum_fraction": 0.5872122645378113, "avg_line_length": 38.099998474121094, "blob_id": "763807b01b1e8e97365a25285b593d256ca42609", "content_id": "7ef6803adaf548240ed2bb1ab7a14ee18f5a5b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 199, "num_lines": 50, "path": "/10Statements/loops.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Python has two primitive loop commands:\n# 1.while loops\n# 2.for loops\n\n#While - With the while loop we can execute a set of statements as long as a condition is true.\n\ni = 1\nwhile i < 6:\n print(i)\n i += 1\nprint(\"===================================================\");\n#For - A for loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string)\nfruits = [\"apple\", \"banana\", \"cherry\"]\nfor fruit in fruits:\n print(fruit)\n\n#The range() Function - To loop through a set of code a specified number of times, we can use the range() function,\n#The range() function returns a sequence of numbers, starting from 0 by default, and increments by 1 (by default), and ends at a specified number.\nprint(\"====================================================\")\nfor i in range(6):\n print(i)\n\n#The range() function defaults to 0 as a starting value, however it is possible to specify the starting value by adding a parameter: range(2, 6), which means values from 2 to 6 (but not including 6):\nprint(\"====================================================\")\nfor i in range(2, 6):\n print(i)\n\n#The range() function defaults to increment the sequence by 1, however it is possible to specify the increment value by adding a third parameter: range(2, 30, 3):\nprint(\"=====================================================\")\nfor i in range(2, 30, 3):\n print(i)\n\n#Else in For Loop\n#The else keyword in a for loop specifies a block of code to be executed when the loop is finished:\nprint(\"=====================================================\")\nfor i in range(10):\n print(i)\nelse:\n print(\"Finally finished!\")\n\n#Nested Loops\n#A nested loop is a loop inside a loop.\n#The \"inner loop\" will be executed one time for each iteration of the \"outer loop\":\nprint(\"======================================================\");\nadj = [\"red\", \"big\", \"tasty\"]\nfruits = [\"apple\", \"banana\", \"cherry\"]\n\nfor x in adj:\n for y in fruits:\n print(x, y)\n" }, { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.6555555462837219, "avg_line_length": 21.5, "blob_id": "e9461828c880aaf75470ee6a7d98e28b9f6f935b", "content_id": "48410d6e7db372ff9fa24f01fc258800f19b1966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/02Strings/basic_string.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "first_name = \"Sudhir\"\nlast_name = \"Sapkal\"\n\nprint(\"My Name Is \"+first_name+\" \"+last_name)\n" }, { "alpha_fraction": 0.6867271065711975, "alphanum_fraction": 0.6908491253852844, "avg_line_length": 35.727272033691406, "blob_id": "ba5a56fa2742e7277da6830d536e616e56c85e49", "content_id": "6094e3bd4812951552cf25aeec705814f04098a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 181, "num_lines": 33, "path": "/06Sets/01Set.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#How to create set\nmy_set = {\"Virat\", \"Rohit\", \"KL Rahul\"}\nprint(my_set)\n\n#What if we give duplicates\nmy_set_with_duplicates = {\"Virat\", \"Rohit\", \"Virat\",\"Rohit\", \"Shikhar\",\"Rohit\"}\nprint(my_set_with_duplicates)\n\n#Loop through items\nmy_players = {\"Haydan\",\"Sachin\",\"Virat\",\"Michel Bevan\",\"Johnty Rohdes\", \"MS Dhoni\"}\nfor player in my_players:\n print(player)\n\n#To add one item to a set use the add() method.\nfruits = {\"apple\", \"banana\", \"cherry\"}\nfruits.add(\"orange\")\nprint(fruits)\n\n#To add more than one item to a set use the update() method.\nbikes = {\"Tiger\", \"Kawaski\", \"Triump\"}\nbikes.update([\"splendar\", \"m80\", \"luna\"])\nprint(bikes)\n\n#The set() Constructor\ntemp_set = set((\"1\", \"2\", \"3\")) # note the double round-brackets\nprint(temp_set)\n\n#Get the Length of a Set\n#To determine how many items a set have, use the len() method.\n#To remove an item in a set, use the remove(), or the discard() method.\n#If the item to remove does not exist, remove() will raise an error.\n#If the item to remove does not exist, discard() will NOT raise an error.\n#You can also use the pop(), method to remove an item, but this method will remove the last item. Remember that sets are unordered, so you will not know what item that gets removed.\n\n" }, { "alpha_fraction": 0.6216216087341309, "alphanum_fraction": 0.6216216087341309, "avg_line_length": 36, "blob_id": "f9dd69b650c9ccbc1ab7814fa3d46e34bc9909ff", "content_id": "4618e9f556e28badd75c225cfe966eed92f5e97f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 53, "num_lines": 2, "path": "/00Variable_OutputFunction/concate_output.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "my_name = \"Sudhir Sapkal\";\nprint(\"Hello \"+my_name+\",Welcome to Python World!!\");\n" }, { "alpha_fraction": 0.6686747074127197, "alphanum_fraction": 0.6987951993942261, "avg_line_length": 32.20000076293945, "blob_id": "b86b930eac167b701caac4b43ecd7a6eb8431e53", "content_id": "d08ada633fe956729e5a43ab967d655226482bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 96, "num_lines": 5, "path": "/10Statements/shorthandifelse.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Short Hand If\n#If you have only one statement to execute, you can put it on the same line as the if statement.\na = 200\nb = 33\nif a > b: print(\"a is greater than b\")\n" }, { "alpha_fraction": 0.6203703880310059, "alphanum_fraction": 0.6203703880310059, "avg_line_length": 14.428571701049805, "blob_id": "7525bf13ec94b1136ca7dc19468b18ec7a9e3c8b", "content_id": "587cd197b32fdb14e9a4a93682191298b0c17908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/14Flask/hello_flask.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "import flask\n\n\n# Create the application.\nAPP = flask.Flask(__name__)\n\[email protected]('/')\ndef helloWorld():\n return \"Hello World From Flask Web Framework\"\n\n\nif __name__ == '__main__':\n APP.debug=True\n APP.run()\n" }, { "alpha_fraction": 0.646766185760498, "alphanum_fraction": 0.6699833869934082, "avg_line_length": 23.1200008392334, "blob_id": "543009dd5a1fd3b33d05352372a095ea7685eecb", "content_id": "eea68f8f97ac733f2552ee03c1f104c15b4c10fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 101, "num_lines": 25, "path": "/12ObjectOrientedProgramming/05Encapsulation.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "# 1. Using OOP in Python, we can restrict access to methods and variables.\n# 2. This prevent data from direct modification which is called encapsulation.\n# 3. In Python, we denote private attribute using underscore as prefix i.e single \"_\" or double \"__\".\n\nclass Computer:\n\n def __init__(self):\n self.__maxprice = 900\n\n def sell(self):\n print(\"Selling Price: {}\".format(self.__maxprice))\n\n def setMaxPrice(self, price):\n self.__maxprice = price\n\nc = Computer()\nc.sell()\n\n# change the price\nc.__maxprice = 1000\nc.sell()\n\n# using setter function\nc.setMaxPrice(1500)\nc.sell()\n" }, { "alpha_fraction": 0.701195240020752, "alphanum_fraction": 0.7410358786582947, "avg_line_length": 20.514286041259766, "blob_id": "9d4e6fa4f1073ac0ff48f74603d6b74805670e3b", "content_id": "7be343c7d805ec40d53eb74c5fa02ed645fca621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 70, "num_lines": 35, "path": "/11MethodAndFunctions/myfunction.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Example \n#Creating a function\n\ndef my_function():\n print(\"Hello from a function\")\n\n#Calling a Function\nmy_function()\n\n#How to send parameters\ndef greetings(name):\n print(\"Good Morning, \"+name)\n\n#Calling a greetings function\ngreetings(\"Vinay\")\n\n#Returning function \ndef addition(num1,num2):\n return num1+num2;\n\n#Calling addition function\n#print(\"Addition =>\"+addition(10,20)) #Will give error\nresult = addition(10,20)\nprint(\"Addition of 10 and 20 is {}\".format(result))\n\n#Default Parameter Value\n#The following example shows how to use a default parameter value.\n#If we call the function without parameter, it uses the default value:\n\ndef substraction(num1=0,num2=0):\n print(num1-num2)\n\nsubstraction(30,20)\nsubstraction();\nsubstraction(50,50)\n" }, { "alpha_fraction": 0.6050000190734863, "alphanum_fraction": 0.6349999904632568, "avg_line_length": 24, "blob_id": "98a6cf108db7ad6235b386490a8b9a90d7f73a95", "content_id": "b1db80a70b2def71a76fba7b39ab0e7f7f141821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/00Variable_OutputFunction/advance_variable.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "x = 4 # x is of type int here\nx = \"Sally\" # x is now of type str here\n# This is called Dynamic Typing\n#C++ is Static Typing.\nprint(x)\ntemp = 20\nferhnite = 27.3+temp\nprint(f'Far => {ferhnite}');\n" }, { "alpha_fraction": 0.6880615949630737, "alphanum_fraction": 0.6880615949630737, "avg_line_length": 25.827587127685547, "blob_id": "41946b85a53e4f333dad9f48a81ee7a03bffe1b8", "content_id": "3a2d924bcc09f9d237df85f32edccf79864d8b13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 349, "num_lines": 29, "path": "/12ObjectOrientedProgramming/08Polymorphism.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "class Parrot:\n\n def fly(self):\n print(\"Parrot can fly\")\n \n def swim(self):\n print(\"Parrot can't swim\")\n\nclass Penguin:\n\n def fly(self):\n print(\"Penguin can't fly\")\n \n def swim(self):\n print(\"Penguin can swim\")\n\n# common interface\ndef flying_test(bird):\n bird.fly()\n\n#instantiate objects\nblu = Parrot()\npeggy = Penguin()\n\n# passing the object\nflying_test(blu)\nflying_test(peggy)\n\n'In the above program, we defined two classes Parrot and Penguin. Each of them have common method fly() method. However, their functions are different. To allow polymorphism, we created common interface i.e flying_test() function that can take any object. Then, we passed the objects blu and peggy in the flying_test() function, it ran effectively.'\n\n" }, { "alpha_fraction": 0.7146562933921814, "alphanum_fraction": 0.722438395023346, "avg_line_length": 24.66666603088379, "blob_id": "902189921c427c74a0351e0a1ac97fe6b85032ff", "content_id": "7c29f310d3c2d9758e2c7df7ad2483ea43d95230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1542, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/04List/01List.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "my_list = [\"1\", 2, \"hello\"]\nprint(my_list)\n\n#You access the list items by referring to the index number:\nfruit_list = [\"apple\", \"banana\", \"cherry\"]\nprint(fruit_list[1])\n\n#To change the value of a specific item, refer to the index number:\n\nfruit_list[1] = \"blackcurrant\"\nprint(fruit_list)\n\n#You can loop through the list items by using a for loop:\nfor x in fruit_list:\n print(x)\n\n#To determine if a specified item is present in a list use the in keyword:\n\nif \"apple\" in fruit_list:\n print(\"Yes, 'apple' is in the fruits list\")\n\n#To determine how many items a list have, use the len() method:\n\nprint(len(fruit_list))\n\n#To add an item to the end of the list, use the append() method:\n\nfruit_list.append(\"orange\")\nprint(fruit_list)\n\n#To add an item at the specified index, use the insert() method:\n\nfruit_list.insert(1, \"orange\")\nprint(fruit_list)\n\n#There are several methods to remove items from a list:\nfruit_list.remove(\"blackcurrant\")\nprint(fruit_list)\n\nfruit_list.pop()\nprint(fruit_list)\n\n# To delet item in list\ndel fruit_list[0]\nprint(fruit_list)\n\n#The clear() method empties the list:\nfruit_list.clear()\nprint(fruit_list)\n\n#The del keyword can also delete the list completely:\nfruit_list[\"23\",\"44\",\"66\"]\ndel fruit_list\n# print(fruit_list) If we try to execute this it will give error of undefine list\n\n#It is also possible to use the list() constructor to make a list\nfruit_list = list((\"apple\", \"banana\", \"cherry\")) # note the double round-brackets\nprint(fruit_list)\n\n#Python has a set of built-in methods that you can use on lists.\n\n\n" }, { "alpha_fraction": 0.6584673523902893, "alphanum_fraction": 0.6584673523902893, "avg_line_length": 28.36111068725586, "blob_id": "4f8a05bf11005ae58f938aa1f9a32263dc063590", "content_id": "30517afc72fcac8c7e66ec3958fa5fbec4e56f6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 280, "num_lines": 36, "path": "/12ObjectOrientedProgramming/07Inheritance.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "# parent class\nclass Bird:\n \n def __init__(self):\n print(\"Bird is ready\")\n\n def whoisThis(self):\n print(\"Bird\")\n\n def swim(self):\n print(\"Swim faster\")\n\n# child class\nclass Penguin(Bird):\n\n def __init__(self):\n # call super() function\n super().__init__()\n print(\"Penguin is ready\")\n\n def whoisThis(self):\n print(\"Penguin\")\n\n def run(self):\n print(\"Run faster\")\n\npeggy = Penguin()\npeggy.whoisThis()\npeggy.swim()\npeggy.run()\n\n'In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class). \n\nThe child class inherits the functions of parent class. We can see this from swim() method. Again, the child class modified the behavior of parent class. We can see this from whoisThis() method. Furthermore, we extend the functions of parent class, by creating a new run() method.\n\nAdditionally, we use super() function before __init__() method. This is because we want to pull the content of __init__() method from the parent class into the child class.'\n" }, { "alpha_fraction": 0.6965429186820984, "alphanum_fraction": 0.7336747646331787, "avg_line_length": 47.8125, "blob_id": "fb3f1220dc9229eabe5c5f38e93c560abece4ca9", "content_id": "f3bee243c74add00942b760a0bd88472b4f02979", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 149, "num_lines": 16, "path": "/09ComparisonOperators/chaining_comparision_operator.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "print(1 < 2 < 3)\n#The above statement checks if 1 was less than 2 and if 2 was less than 3. \n#We could have written this using an and statement in Python:\nprint(1<2 and 2<3)\n\n#The and is used to make sure two checks have to be true in order for the total check to be true. Let's see another example:\nprint(1 < 3 > 2)\n\n#The above checks if 3 is larger than both of the other numbers, so you could use and to rewrite it as:\nprint(1<3 and 3>2)\n\n#It's important to note that Python is checking both instances of the comparisons. We can also use or to write comparisons in Python. For example:\nprint(1==2 or 2<3)\n\n#Note how it was true; this is because with the or operator, we only need one or the other to be true. Let's see one more example to drive this home:\nprint(1==1 or 100==1)\n" }, { "alpha_fraction": 0.5407407283782959, "alphanum_fraction": 0.5481481552124023, "avg_line_length": 15.875, "blob_id": "f8a69ab4f0c410c804c41e8a287a8f08359478af", "content_id": "f096df08d4bb5bf4859e9c9eaae0ae2a1e7d9b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 31, "num_lines": 16, "path": "/03Operators/airthmatic.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "x = 5;\ny = 2;\naddi = x+y;\nsubs = x-y;\nmult = x*y;\ndiv = x/y;\nmod = x%y;\nexpo = x**y;\nfloordiv = x//y;\nprint(f'Add {addi}');\nprint(f'Sub {subs}');\nprint(f'Mult {mult}');\nprint(f'Div {div}');\nprint(f'Mod {mod}');\nprint(f'expo {expo}');\nprint(f'Floor Div {floordiv}');\n" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 20.14285659790039, "blob_id": "bcaf3f3ce5f42ba9c8d92d7cfb004af75588cb8b", "content_id": "93de3bdd1d8bef42e351f96bfbe33a0e42e1b158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/01PythonObjectDataStructure/02Int.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "normal_int = 10\nlarge_int = 46333666787711\nnegative_int = -25452233\n\nprint(type(normal_int))\nprint(type(large_int))\nprint(type(negativee_int))\n" }, { "alpha_fraction": 0.5642633438110352, "alphanum_fraction": 0.5642633438110352, "avg_line_length": 25.58333396911621, "blob_id": "fa4ef7fda39f7b46e265a00015d6cdd73bc27cbe", "content_id": "f9ad0ea2cfffa7f17f26f61899b9316d026f524a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/12ObjectOrientedProgramming/03BuiltIn_Class_Attributes.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n\n\nprint \"Person.__doc__:\", Person.__doc__\nprint \"Person.__name__:\", Person.__name__\nprint \"Person.__module__:\", Person.__module__\nprint \"Person.__bases__:\", Person.__bases__\nprint \"Person.__dict__:\", Person.__dict__\n" }, { "alpha_fraction": 0.6843971610069275, "alphanum_fraction": 0.6985815763473511, "avg_line_length": 30.33333396911621, "blob_id": "a6e73e0c55105a3695536845ea14e4b305981a87", "content_id": "64011ddb7c3fa1567e0f63ea1c20f17cd49cf395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 86, "num_lines": 9, "path": "/12ObjectOrientedProgramming/01OOP_Basic.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Creat Class Example\nclass MyClass:\n x = 05 #Member of MyClass\n y = 10 #Member of MyClass\n\n\nmy_class_object = MyClass()\nprint(my_class_object) #See what happens when you print object\nprint(my_class_object.x) #This should print x value which is member of MyClass\n" }, { "alpha_fraction": 0.5754339098930359, "alphanum_fraction": 0.5994659662246704, "avg_line_length": 28.920000076293945, "blob_id": "6dc4567f7f346a6808a129596fdf0b9804643714", "content_id": "b385934b966168511e4280af5e8dc746fa142daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "no_license", "max_line_length": 116, "num_lines": 25, "path": "/14Flask/employeeManagmentViaFlask.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "from flask import Flask, jsonify\n\n\n# Create the application.\nAPP = Flask(__name__)\n\[email protected]('/')\ndef myHome():\n\treturn \"My Home\";\n\[email protected]('/api/employees', methods=['GET'])\ndef getEmployees():\n employees = [{\"emp_id\":\"1\",\"name\":\"Sudhir\"},{\"emp_id\":\"11\",\"name\":\"Sudhir1\"},{\"emp_id\":\"133\",\"name\":\"Sudhir23\"}]\n return jsonify({\"message\":\"success\", \"data\":employees})\n\[email protected]('/api/employee/<emp_id>', methods=['GET'])\ndef getEmployee(emp_id):\n emps = [{\"emp_id\":\"1\",\"name\":\"Sudhir\"},{\"emp_id\":\"11\",\"name\":\"Sudhir1\"},{\"emp_id\":\"133\",\"name\":\"Sudhir23\"}]\n for employee in emps:\n return employee.name\n return jsonify({\"message\":\"success\",\"data\":employee}) \n\nif __name__ == '__main__':\n APP.debug=True\n APP.run()\n\n" }, { "alpha_fraction": 0.5446428656578064, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 19.363636016845703, "blob_id": "a3cca1ba3af1400e75212cce034da01ea8531a7e", "content_id": "738ca022733985cb1d6183627612f3c6369da972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/11MethodAndFunctions/pig_latin.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "def pig_latin(word):\n first_letter = word[0];\n\n if first_letter in 'aeiou':\n pig_word = word + 'ay';\n else:\n pig_word = word[1:] + first_letter + 'ey'\n\n return pig_word;\n\nprint(pig_latin('apple'));\n" }, { "alpha_fraction": 0.7588357329368591, "alphanum_fraction": 0.7889813184738159, "avg_line_length": 64.2203369140625, "blob_id": "9a004d8959833872f2c944d45c5f9ae897fbff61", "content_id": "c972bdea4f70d33fb23833846d3acd9957bad6c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7696, "license_type": "no_license", "max_line_length": 176, "num_lines": 118, "path": "/README.md", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "# Python Basic Programs\n\n[![GitHub stars](https://img.shields.io/github/stars/sudhir-j-sapkal/python-basic-examples.svg)](https://github.com/sudhir-j-sapkal/python-basic-examples/stargazers)\n![Python](https://img.shields.io/badge/Python-3.6-brightgreen.svg)\n\nThis is collection of basic Python Programs.<br />\n\n **BEWARE**: This is a work in progress.\n * Code here may change and disappear without warning.\n\n#### How to use code\n\n1. First install Python 3+ on your machin according to steps for respective OS given [Here](https://www.python.org/downloads/).\n2. Fork this repository.\n3. Now you will have folder `python-basic-examples`.\n4. Navigate to this folder in terminal.\n5. To run the each individual program use `python {{file_name}}.py`.\n\n **Note :** This all programs are done by focusing on python3+ versions. You may face some issues while running this programes on python2.\n\n##### 00. Variables Declaration and Usage\n\n1. [Hello World In Python](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/00Variable_OutputFunction/hello_world.py)\n2. [Basic Variable Declaration](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/00Variable_OutputFunction/basic_variable.py)\n3. [Dynamic Typing and Printing output](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/00Variable_OutputFunction/advance_variable.py)\n4. [Concat Strings](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/00Variable_OutputFunction/concate_output.py)\n\n##### 01. Python Objects and Data Types\n\n1. [Numbers](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/01PythonObjectDataStructure/01numbers.py)\n2. [Integer](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/01PythonObjectDataStructure/02Int.py)\n3. [Float](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/01PythonObjectDataStructure/03Float.py)\n\n##### 02. Strings\n\n1. [Basic String Declaration and Usage](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/02Strings/basic_string.py)\n2. [Indexing and Slicing of String](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/02Strings/indexing_slicing.py)\n3. [Take Input from User](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/02Strings/input_from_user.py)\n4. [String Formating](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/02Strings/print_formating_with_string.py)\n5. [String Methods and Properties](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/02Strings/properties_and_method_of_string.py)\n\n##### 03. Operators\n\n1. [Arithmatic Operators](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/03Operators/airthmatic.py)\n2. [Special Operators](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/03Operators/special_operator.py)\n\n##### 04. List\n\n1. [List Declaration and Method usage](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/04List/01List.py)\n\n##### 05. Tuple\n\n1. [Tuple Declaration and Method usage](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/05Tuple/01Tuple.py)\n\n##### 06. Sets\n\n1. [Sets Declaration and methods usage](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/06Sets/01Set.py)\n\n##### 07. Dictionaries\n\n1. [Declaration and Method Usage](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/07Dictionaries/01Dictionaries.py)\n\n##### 08. File Handling\n\n1. [Basic File Handling](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/08Files/01File.py)\n\n##### 09. Comparision Operators\n\n1. [Chaining of Comparision Operators](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/09ComparisonOperators/chaining_comparision_operator.py)\n\n##### 10. Statments\n\n1. [If](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/10Statements/ifstatement.py)\n2. [elif](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/10Statements/elif.py)\n3. [else](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/10Statements/else.py)\n4. [shorthandifelse](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/10Statements/shorthandifelse.py)\n5. [Loops](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/10Statements/loops.py)\n\n##### 11. Function\n\n1. [Declaration and Defination, Passing parameters and Returning value](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/11MethodAndFunctions/myfunction.py)\n2. [Nested Functions and Variable Scopes](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/11MethodAndFunctions/nestedstatementAndScope.py)\n3. [Sending args and kwargs to functions](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/11MethodAndFunctions/argsANDkargs.py)\n\n##### Assignment 1: Pig Latin\n\n- [Solution](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/11MethodAndFunctions/pig_latin.py)\n\n##### 12. OOP Concepts\n\n1. [Basic OOP](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/01OOP_Basic.py)\n2. [Init Method](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/02init_method.py)\n3. [Built in Class Attributes](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/03BuiltIn_Class_Attributes.py)\n4. [Class and Object](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/04ClassAndObject.py)\n5. [Encapsulation](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/05Encapsulation.py)\n6. [Methods inside Class](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/06Methods.py)\n7. [Inheritance](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/07Inheritance.py)\n8. [Polymorphism](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/08Polymorphism.py)\n\n##### Assignement 2 : Employee Management Assignment using All Above concepts\n\n- [Solution](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/12ObjectOrientedProgramming/09EmplDeptManagement.py)\n\n##### 13. MySql with Python\n\n1. [Check MySql Connector working or not](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/01demo_mysql_test.py)\n2. [Create Connection](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/02CreateConnection.py)\n3. [Create Database](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/03CreateDatabase.py)\n4. [Check DB Exist or Not](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/04CheckDBExist.py)\n5. [Connect to DB and Create Table](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/05ConnectToDB.py)\n6. [Inset Record](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/06Insert.py)\n7. [Select Record](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/13MysqlWithPython/07Select.py)\n\n##### 14. Flask In Python\n\n1. [Hello World using Flask](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/14Flask/hello_flask.py)\n2. [Variable Rule](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/14Flask/variable_rule.py)\n3. [Url Binding](https://github.com/sudhir-j-sapkal/python-basic-examples/blob/master/14Flask/url_binding.py)\n" }, { "alpha_fraction": 0.6088888645172119, "alphanum_fraction": 0.6088888645172119, "avg_line_length": 18.565217971801758, "blob_id": "1a24ad293225c6629f9042b3b68d462466b5f6f1", "content_id": "f10e8540478d3a6d1f62ec7c7a97d32dd1514d59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 50, "num_lines": 23, "path": "/02Strings/properties_and_method_of_string.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#strip()\na = \" Hello, World! \"\nprint(a.strip()) # returns \"Hello, World!\"\n\n#len()\na = \"Hello, World!\"\nprint(len(a)) # returns length\n\n#lower()\na = \"Hello, World!\"\nprint(a.lower()) # makes all string lower case\n\n#upper()\na = \"Hello, World!\"\nprint(a.upper()) # makes all string upper case\n\n#replace()\na = \"Hay Hello, World\"\nprint(a.replace(\"H\", \"J\")) # replaces H with J\n\n#split()\na = \"Hello, World!\"\nprint(a.split(\",\")) # returns ['Hello', ' World!']\n" }, { "alpha_fraction": 0.7099999785423279, "alphanum_fraction": 0.7133333086967468, "avg_line_length": 26.272727966308594, "blob_id": "2bdabf40ec94709338d3493f198e88227f08aa58", "content_id": "3d2bdcf66c69d94a8dc4b4b3cfa3991fe05d4e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 117, "num_lines": 11, "path": "/08Files/01File.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "f = open(\"demo.txt\", \"r\")\n#print(f.read())\n\n#Read Only Parts of the File\n#By default the read() method returns the whole text, but you can also specify how many character you want to return:\n\n#print(f.read(5))\n\n#Read Lines\n#You can return one line by using the readline() method:\nprint(f.readline())\n" }, { "alpha_fraction": 0.6997690796852112, "alphanum_fraction": 0.7193995118141174, "avg_line_length": 26.0625, "blob_id": "94c8e9e16d63b91981abedf3a26a6ef4fc9ea250", "content_id": "0067c7469771d2b71e2ad550405c7c2f7e58be76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/07Dictionaries/01Dictionaries.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Create and print a dictionary:\nmy_car_dict = {\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964\n}\nprint(my_car_dict)\n\n#Accessing Items\n#You can access the items of a dictionary by referring to its key name, inside square brackets:\ncar_model = my_car_dict[\"model\"]\nprint(car_model)\n\n\n#There is also a method called get() that will give you the same result:\nplayer_dict = {\n \"cap_id\": 101,\n \"name\": \"Virat\",\n \"highest_score\": 183\n}\nhighest_score = player_dict.get(\"highest_score\")\nprint(highest_score)\n\n#You can change the value of a specific item by referring to its key name:\nplayer_dict[\"cap_id\"] = 175\nprint(player_dict)\n\n#The dict() Constructor\ndict_with_constructor = dict(brand=\"Ford\", model=\"Mustang\", year=1964)\n# note that keywords are not string literals\n# note the use of equals rather than colon for the assignment\nprint(dict_with_constructor)\n" }, { "alpha_fraction": 0.5707316994667053, "alphanum_fraction": 0.5951219797134399, "avg_line_length": 17.636363983154297, "blob_id": "9618bbb100fb8bf766046b843f174eb4af858034", "content_id": "79fe374ddaafbdba1a9cddea3775387161f4a6c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/10Statements/else.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "a = 200\nb = 33\nif b > a:\n print(\"b is greater than a\")\n print(\"inside if\")\nelif a == b:\n print(\"a and b are equal\")\n print(\"inside el if\");\nelse:\n print(\"a is greater than b\")\n print('inside else');\n" }, { "alpha_fraction": 0.7100494503974915, "alphanum_fraction": 0.7166392207145691, "avg_line_length": 36.9375, "blob_id": "0431816c77df08e460ec47f5b92fd1d8c0808ef6", "content_id": "f40eedab6b3c406cfe952ef78ab91e883cf816da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 92, "num_lines": 16, "path": "/02Strings/print_formating_with_string.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "print(\"This is print formating, So I am gonna Insert String here {}\".format('INSERTED'));\n\n#This will be usefull when you wanted to do something like below \nprint(\"Hey {} Your college {} is Super Awesome !\".format('Sudhir',\"TKIET\"))\n\n#Here the replacement order can be passed in interpolation\nprint(\"My Name is {1} {0}\".format('Sudhir','Sapkal'))\n\n#We can also assign a variable names and replace the string accordingly\n\nprint(\"My Name is {first_name} {last_name}\".format(first_name='Sudhir', last_name='Sapkal'))\n\n#f formatted string litterls\nname = 'Sudhir'\nage = 25\nprint(f'{name} is {age} years old');\n" }, { "alpha_fraction": 0.6526255011558533, "alphanum_fraction": 0.6583958268165588, "avg_line_length": 24.115942001342773, "blob_id": "c5496307cf5cf8d2eb1189dbda9b1ebfff24eae4", "content_id": "8eac2d92495e601eb33851b2bf085fd3a69ad630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1733, "license_type": "no_license", "max_line_length": 172, "num_lines": 69, "path": "/13MysqlWithPython/07Select.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "import mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"root\",\n database=\"mypython_test_db\"\n)\n\nmycursor = mydb.cursor()\n\nmycursor.execute(\"SELECT * FROM employee\")\n\nmyresult = mycursor.fetchall()\nprint(\"**************All Records*******\")\nfor x in myresult:\n print(x)\n\n#Where Condition\n\nsql1 = \"SELECT * FROM employee WHERE address ='Pune'\"\n\nmycursor.execute(sql1)\n\nmyresult = mycursor.fetchall()\nprint(\"***********Records which satisfy where condition ***********\")\nfor x in myresult:\n print(x)\n\nprint(\"**********Record which have 'a' in name************\")\nsql2 = \"SELECT * FROM employee WHERE name LIKE '%ha%'\"\n\nmycursor.execute(sql2)\n\nmyresult = mycursor.fetchall()\n\nfor x in myresult:\n print(x)\n\n#Prevent SQL Injection\n\nprint(\"**********Prevent SQL Injection******************************\");\nsql3 = \"SELECT * FROM employee WHERE name = %s\"\nname = (\"Salman Khan\", )\n\nmycursor.execute(sql3, name)\n\nmyresult = mycursor.fetchall()\n\nfor x in myresult:\n print(x)\n\nprint(\"********Order By**************\");\nsql4 = \"SELECT * FROM employee ORDER BY name\"\n#To have DESC order give DESC after the name in above query\nmycursor.execute(sql4)\n\nmyresult = mycursor.fetchall()\n\nfor x in myresult:\n print(x)\n\nprint(\"**************Delete Record************************\")\nsql5 = \"DELETE FROM employee WHERE name = 'Amey Wagh'\"\nmycursor.execute(sql5)\nmydb.commit()\n#Notice the statement: mydb.commit(). It is required to make the changes, otherwise no changes are made to the table.\n#Notice the WHERE clause in the DELETE syntax: The WHERE clause specifies which record(s) that should be deleted. If you omit the WHERE clause, all records will be deleted!\nprint(mycursor.rowcount, \"record(s) deleted\")\n" }, { "alpha_fraction": 0.5929487347602844, "alphanum_fraction": 0.5993589758872986, "avg_line_length": 23, "blob_id": "bad5ba0fbc33463ddff71b6ea2b8a2a6ebcb2aba", "content_id": "0f6e906d6529a1ccb02aa930b4907278d831ac8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 44, "num_lines": 13, "path": "/12ObjectOrientedProgramming/02init_method.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n # setAge(age)\n\n def setAge(self,age):\n self.age = age\n\nperson_dagdu = Person(\"Dagdu\", 40)\nprint(person_dagdu)\nprint(\"Person Name:\" + person_dagdu.name)\nprint(\"Person Age:\" + str(person_dagdu.age))\n" }, { "alpha_fraction": 0.703071653842926, "alphanum_fraction": 0.7269624471664429, "avg_line_length": 25.636363983154297, "blob_id": "2e6f009ce8efab7e864ecb247fecb56856c5587e", "content_id": "c5c3e4733f15aac916e1207323274af57e2170da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/01PythonObjectDataStructure/01numbers.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "my_int = 20 # int\nmy_float = 45.5 # float\nmy_complex = 20j # complex\n#Print the values of the above declared varibales\nprint(my_int)\nprint(my_float)\nprint(my_complex)\n#Print the Data Types of the above declared variables\nprint(type(my_int))\nprint(type(my_float))\nprint(type(my_complex))\n" }, { "alpha_fraction": 0.7113401889801025, "alphanum_fraction": 0.7353951930999756, "avg_line_length": 23.16666603088379, "blob_id": "2cc8646554831328d956d62cfc45bface9824c73", "content_id": "d30abf4fe872e0a03fe96d34d6e19aa6f067d51c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 88, "num_lines": 12, "path": "/02Strings/indexing_slicing.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Get the character at position 1 (remember that the first character has the position 0):\n\n#Substring. Get the characters from position 2 to position 5 (not included)\n\n\nmy_name = \"Sudhir Sapkal\"\n\n#Indexing can be done on String\nprint(my_name[1])\n\n#Slicing can be done on string\nprint(my_name[2:5])\n\n" }, { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7285714149475098, "avg_line_length": 33.5, "blob_id": "c8d3e6a128c1b962ebef266be55dc5bf581f7d7a", "content_id": "b1512710de487814e140ed7c19644045c8032b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 42, "num_lines": 2, "path": "/00Variable_OutputFunction/basic_variable.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "\nmy_first_variable=\"Yeah, It's a variable\";\nprint(my_first_variable);\n" }, { "alpha_fraction": 0.660639762878418, "alphanum_fraction": 0.680111289024353, "avg_line_length": 27.760000228881836, "blob_id": "cc0277f895385b225607ba8730df08dd53f439ad", "content_id": "adbaf3c57f32fce119a74f086fa9b7565a0e1ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 91, "num_lines": 25, "path": "/11MethodAndFunctions/argsANDkargs.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "def myfunc(*args):\n return sum(args) * 0.05\n\nprint(myfunc(50,60))\n\n#args - python treats agrs as tuples\n#args is just name , it can be anything , just make sure it should followed with '*' symbol\n\ndef myfuncforkargs(**kwargs):\n print(kwargs)\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\nmyfuncforkargs(fruit=\"apple\",icecream=\"butterscotch\")\n\n#kargs - means it is sending the argubments as key word arguments called as dictonories \n\n#we can use both at same time\n\ndef my_function(*args,**kwargs):\n print('I would like {} {}'.format(args[0],kwargs['food']))\n\nmy_function(10,20,30,food=\"eggs\",fruit=\"apple\")\n" }, { "alpha_fraction": 0.5987260937690735, "alphanum_fraction": 0.6157112717628479, "avg_line_length": 15.206896781921387, "blob_id": "fe97e7af537beb508cd8dfa723740adb42c6069d", "content_id": "dc98d0a9407771d4d5d9739212a4626b3ad0bf57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/11MethodAndFunctions/nestedstatementAndScope.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "x = 20\ndef printer():\n x=10\n return x\n\nprint(x)\n\n#LEGB \n#1. Local(L): Defined inside function/class\n#2. Enclosed(E): Defined inside enclosing functions(Nested function concept)\n#3. Global(G): Defined at the uppermost level\n#4. Built-in(B): Reserved names in Python builtin modules\n\n#Globle\nname = \"Global Sudhir\"\n\ndef greet():\n\n #ENCLOSING\n name = \"Enclosing Sudhir\"\n\n def hello():\n #LOCAL\n name = \"LOCAL Sudhir\"\n print(\"Hello \"+name)\n\n hello()\n\ngreet()\n\n" }, { "alpha_fraction": 0.6632503867149353, "alphanum_fraction": 0.6903367638587952, "avg_line_length": 29.35555648803711, "blob_id": "570f1e935ddf1167c845324e65211a446ecca4cf", "content_id": "3769b942a88e6e3767621e762d7bbb4113d18e3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 103, "num_lines": 45, "path": "/05Tuple/01Tuple.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#Create Tuple\nmy_tuple = (\"2\",\"2.55\",\"Sudhir\",2.4,556);\nprint(my_tuple)\n\n#Access item in tuple\nprint(my_tuple[1]);\n\n#Change the tuple value(Once a tuple is created, you cannot change its values. Tuples are unchangeable)\n# my_tuple[1] = \"blackcurrant\"\n# The values will remain the same:\n\n# You can't do following operations on tuble \n # 1. append,\n # 2. insert, \n # 3. delete, \n # 4. remove, \n # 5. pop \n\n#Loop Through a Tuple\n# You can loop through the tuple items by using a for loop.\nbikes_tuple = (\"Triump Tiger\",\"Kawaski Ninza\",\"MV Augusta\",\"Tiger 800 XC\")\nfor bike in bikes_tuple:\n print(bike)\n\n#Check if Item Exists(To determine if a specified item is present in a tuple use the in keyword)\nif \"Triump Tiger\" in bikes_tuple:\n print(\"Yes, 'Triump Tiger' bike is in the bikes tuple\")\n\n#The tuple() Constructor\n#It is also possible to use the tuple() constructor to make a tuple.\n\nbikes_tuple = tuple((\"Triump Tiger\",\"Kawaski Ninza\",\"MV Augusta\",\"Tiger 800 XC\"))\nprint(bikes_tuple)\n\n#Methods\n# 1. count(value) -> Returns the number of times a specified value occurs in a tuple\ntemp_tuple = (1, 3, 5, 8, 7, 5, 4, 5, 8, 5)\ncount_of_five = temp_tuple.count(5)\nprint(count_of_five)\n\n#2. index() - Search for the first occurrence of the value 8, and return its position:\nindex_of_eight = temp_tuple.index(8)\nprint(index_of_eight)\n\n#The index() method raises an exception if the value is not found.\n" }, { "alpha_fraction": 0.5794780254364014, "alphanum_fraction": 0.5836299061775208, "avg_line_length": 25.746030807495117, "blob_id": "484fc98734de975d637037b3dc2de0c12d58d8e3", "content_id": "8c3cea3c56c2e1db05504155bc9d2c52d34581b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 64, "num_lines": 63, "path": "/12ObjectOrientedProgramming/09EmplDeptManagement.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "class Employee:\n __id=0\n __name=\"\"\n __gender=\"\"\n __city=\"\"\n __salary=0\n __dept_id=0\n\n def setEmployeeData(self):\n self.__id = int(input(\"Enter Id:\\t\"))\n self.__name = input(\"Enter Name:\\t\")\n self.__gender = input(\"Enter Gender:\\t\")\n self.__city = input(\"Enter City:\\t\")\n self.__salary = int(input(\"Enter Salary:\\t\"))\n self.__dept_id = int(input(\"Enter Department Id:\\t\"))\n \n def showEmployeeData(self):\n print(\"Id\\t\\t:\",self.__id)\n print(\"Name\\t:\", self.__name)\n print(\"Gender\\t:\", self.__gender)\n print(\"City\\t:\", self.__city)\n print(\"Salary\\t:\", self.__salary)\n print(\"Department I\\t:\",self.__dept_id)\n \n\nemployees = list(());\n#print(employees)\nprint(\"Enter Employee Details\")\nfor i in range(1):\n #print(i);\n employee=Employee()\n employee.setEmployeeData()\n employees.append(employee)\n\nfor employee in employees:\n employee.showEmployeeData();\n\nclass Department:\n __id=0\n __name=\"\"\n __emp_count=0\n\n def setDepartmentData(self):\n self.__id = int(input(\"Enter Id:\\t\"))\n self.__name = input(\"Enter Name:\\t\")\n self.__emp_count = int(input(\"Enter Employee Count:\\t\"))\n \n def showDepartmentData(self):\n print(\"Id\\t\\t:\",self.__id)\n print(\"Name\\t\\t:\", self.__name)\n print(\"Employee Count\\t:\",self.__emp_count)\n\ndepartments = list(());\n#print(employees)\nprint(\"\\n\\n Enter Department Details\\n\")\nfor i in range(1):\n #print(i);\n department=Department()\n department.setDepartmentData()\n departments.append(department)\n\nfor department in departments:\n department.showDepartmentData();\n\n" }, { "alpha_fraction": 0.5400516986846924, "alphanum_fraction": 0.604651153087616, "avg_line_length": 11.0625, "blob_id": "7aba4bf34b6fd18acadb41a80a9ece9b82b19f76", "content_id": "644dbd1f857ab7db7d27f0dd721ed23a2c4e5521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 23, "num_lines": 32, "path": "/03Operators/special_operator.py", "repo_name": "sudhir-j-sapkal/python-basic-examples", "src_encoding": "UTF-8", "text": "#1. Identity Operator\nx1 = 5\ny1 = 5\nx2 = 'Hello'\ny2 = 'Hello'\nx3 = [1,2,3]\ny3 = [1,2,3]\n\n# Output: False\nprint(x1 is not y1)\n\n# Output: True\nprint(x2 is y2)\n\n# Output: False\nprint(x3 is y3)\n\n#2. Membership operator\nx = 'Hello world'\ny = {1:'a',2:'b'}\n\n# Output: True\nprint('H' in x)\n\n# Output: True\nprint('hello' not in x)\n\n# Output: True\nprint(1 in y)\n\n# Output: False\nprint('a' in y)\n\n" } ]
38
SolSpecSolutions/PyStuffPixAWS
https://github.com/SolSpecSolutions/PyStuffPixAWS
cb35145f06917343cba5e0b502948831fb7e5045
3275a68414cd78ab77595193a97bf6b59cbbbfcf
cff72edf7bdcef2b86beab751e76477b0166fd55
refs/heads/master
2020-03-13T07:57:57.933718
2018-09-20T03:02:14
2018-09-20T03:02:14
131,034,807
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5504950284957886, "alphanum_fraction": 0.5613861680030823, "avg_line_length": 36.407405853271484, "blob_id": "4901fbda1e139afc5e44373b3933d5243d332af8", "content_id": "03a519af387d6f6f7f6f2e516a628e550802eab1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 90, "num_lines": 27, "path": "/suncor_s3download.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "import boto3, botocore, os, pprint\n\n# GLOBAL VARIABLES\nIMAGE_PATH = '/var/www/utils.solspec.solutions/html/suncor/images/'\nBUCKET_NAME = 'suncor-images'\n\n# download recursively, maintaining file structure\ns3 = boto3.client('s3')\ns3objs = s3.list_objects(Bucket=BUCKET_NAME)\n\n# print all file keys\n#for obj in s3objs.get('Contents'):\n# print(obj['Key'])\n\n# create directory structure\nfor obj in s3objs.get('Contents'):\n # only grab JPGs\n if 'JPG' in obj['Key']:\n filename = obj['Key'].split('/')[-1]\n directory = '/'.join(obj['Key'].split('/')[:-1])\n if not os.path.exists(IMAGE_PATH + directory):\n os.makedirs(IMAGE_PATH + directory)\n if not os.path.exists(IMAGE_PATH + obj['Key']):\n s3.download_file(BUCKET_NAME, obj['Key'], IMAGE_PATH + obj['Key'])\n print('Downloaded: ' + filename)\n else:\n print('Skipped: ' + filename)\n" }, { "alpha_fraction": 0.680701732635498, "alphanum_fraction": 0.6877192854881287, "avg_line_length": 26.14285659790039, "blob_id": "2b518d1b4dab5d7908a95f5b6e7908d40f418617", "content_id": "dca1741ef68f732a9f9645aa8c2405e817edeeab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "no_license", "max_line_length": 95, "num_lines": 21, "path": "/ConvertTGStoCSV_SameFolder.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\n#Import Modules\nimport pandas as pd\nimport csv\nimport os\n\n#Variables\n#procFolder = raw_input(\"Enter Project Folder Path: \")\nprocFolder = \"G:\\ImageProcessing\\TGS\"\n\n#Load .tgs files into an array\ntgsFiles = os.listdir(procFolder)\n\n#Create .csv file from .tgs in the same folder\nfor file in tgsFiles:\n #print file\n baseFilename = os.path.splitext(file)[0]\n inFile = pd.read_csv(procFolder + \"/\" + file, header=None)\n inFile[0] = inFile[0] + '.jpg'\n inFile.to_csv(procFolder + \"/\" + baseFilename + \".csv\", sep=\",\", index=False, header=False)\n" }, { "alpha_fraction": 0.67641681432724, "alphanum_fraction": 0.6837294101715088, "avg_line_length": 26.350000381469727, "blob_id": "265c8989a353ec60c83fb5a14a091b727f70ba13", "content_id": "ea2e74c3f7d8d746466f048d649fa9f1d7d8855d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 76, "num_lines": 20, "path": "/ConvertTGStoCSV_Subfolders.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\n#Import Modules\nimport pandas as pd\nimport csv\nimport os\n\n#Variables\npFolder = \"G:\\ImageProcessing\\TGS\"\n\n#Load subfolders into an array\ntgsFolders = os.listdir(pFolder)\n\n#Loop through subfolders and create .csv file from .tgs in the subfolder\nfor folder in tgsFolders:\n file = pFolder + \"\\\\\" + folder + \"\\\\\" + folder + \".tgs\"\n baseFilename = os.path.splitext(file)[0]\n inFile = pd.read_csv(file, header=None)\n inFile[0] = inFile[0] + '.jpg'\n inFile.to_csv(baseFilename + \".csv\", sep=\",\", index=False, header=False)\n" }, { "alpha_fraction": 0.5944253206253052, "alphanum_fraction": 0.6169746518135071, "avg_line_length": 39.93589782714844, "blob_id": "0f9a3989b694ebeb31abc7870d9d021a27c80997", "content_id": "2196f54d3ac2f4efa820fb295f591089f5397c86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3193, "license_type": "no_license", "max_line_length": 126, "num_lines": 78, "path": "/pix_process_remote.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 12:06:19 2018\n\n@author: michaelokonski\n\"\"\"\n\nimport paramiko\nimport os\n\n#To do...tie in grab stopped ec2 script. Paths are here for testing\ns3_project_folder = 'solspec-test-mo/20180227_54037'\n\npix_server = 'ec2-18-236-246-157.us-west-2.compute.amazonaws.com'\n\ndef pix_process(*args):\n #print(s3_project_folder)\n \n #Connect via SSH and run commands\n #Change this to local key file\n aws_private_key_file = \"/Users/michaelokonski/Dropbox/SolSpec/Solspec_Logins/Pix4DProcessingServers.pem\"\n k = paramiko.RSAKey.from_private_key_file(aws_private_key_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect( hostname = pix_server, username = 'ubuntu', pkey = k )\n print('Sucessfully connected to ' + pix_server + ', ' + 'Pix4D Linux Process Server ' + pix_server)\n\n bucket_name = s3_project_folder.split(os.sep)[0]\n parent_folder = s3_project_folder.split(os.sep)[1]\n \n #Preparing payload\n #Create working directory on local\n stdin , stdout, stderr = client.exec_command('mkdir -p ' + s3_project_folder)\n print(\"stdout: \" + stdout.read())\n print(\"stderr: \" + stderr.read())\n \n #Sync to local\n stdin , stdout, stderr = client.exec_command('aws s3 ls ' + s3_project_folder)\n print('syncing from s3 to local.........................................................')\n print(\"stdout: \" + stdout.read() + 'Sync from s3 to local completed successfully.')\n print(\"stderr: \" + stderr.read())\n \n #Sync to local server\n stdin , stdout, stderr = client.exec_command('mkdir -p ' + s3_project_folder)\n print(\"stdout: \" + stdout.read())\n print(\"stderr: \" + stderr.read())\n\n stdin , stdout, stderr = client.exec_command('aws s3 sync s3://' + s3_project_folder + ' ' + s3_project_folder)\n print('syncing from s3 to local.........................................................')\n print(\"stdout: \" + stdout.read() + 'Sync from s3 to local completed successfully.')\n print(\"stderr: \" + stderr.read())\n \n #Pix4D Stuff\n #Login\n stdin , stdout, stderr = client.exec_command('pix4dmapper -c --email [email protected] --password H2@2014!')\n print(\"stdout: \" + stdout.read())\n print(\"stderr: \" + stderr.read())\n \n #Create project\n start_text = 'pix4dmapper -c -n --image-dir '\n out_path = 'process_jobs/' #Local process server workspace\n\n stdin , stdout, stderr = client.exec_command(start_text + s3_project_folder + ' ' + out_path + s3_project_folder + '.p4d')\n print(\"stdout: \" + stdout.read() + '............Project created successfully............')\n print(\"stderr: \" + stderr.read())\n \n #Run project\n stdin , stdout, stderr = client.exec_command('pix4dmapper -c -r ' + out_path + bucket_name + '/' + parent_folder + '.p4d')\n print('Project started at: ' + stdout.read())\n print(\"stderr: \" + stderr.read())\n \n #Sync to s3\n stdin , stdout, stderr = client.exec_command('cd ' + bucket_name + ' && ' + 'aws s3 sync . s3://' + bucket_name)\n print(\"stdout: \" + stdout.read())\n print(\"stderr: \" + stderr.read())\n \npix_process()\n" }, { "alpha_fraction": 0.6152832508087158, "alphanum_fraction": 0.6966403126716614, "avg_line_length": 40.20814514160156, "blob_id": "2c0e1ca5aacda7422d389dd0b6af62aec7c39f99", "content_id": "385efcb3635bea956f33716e8b6d5322e60b3065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9108, "license_type": "no_license", "max_line_length": 181, "num_lines": 221, "path": "/pix_boto_ssh_process.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 12:31:53 2018\n\n@author: michaelokonski\n\"\"\"\n\nimport boto3\nimport paramiko\n#import time\nimport os\nimport io\nimport pandas as pd\n\n#To do......describe aws instances and lift all DNS from response\n\n#Linux Server List. Add additional instances below and make sure to add to conditional below.\nlinux_1_private_dns = 'ip-172-31-47-242.us-west-2.compute.internal'\nlinux_2_private_dns = 'ip-172-31-46-248.us-west-2.compute.internal'\nlinux_3_private_dns = 'ip-172-31-45-161.us-west-2.compute.internal'\nlinux_4_private_dns = 'ip-172-31-47-73.us-west-2.compute.internal'\nlinux_5_private_dns = 'ip-172-31-45-185.us-west-2.compute.internal'\nlinux_6_private_dns = 'ip-172-31-42-27.us-west-2.compute.internal'\nlinux_7_private_dns = 'ip-172-31-40-20.us-west-2.compute.internal'\nlinux_8_private_dns = 'ip-172-31-46-54.us-west-2.compute.internal'\nlinux_9_private_dns = 'ip-172-31-40-175.us-west-2.compute.internal'\nlinux_10_private_dns = 'ip-172-31-33-58.us-west-2.compute.internal'\nlinux_11_private_dns = 'ip-172-31-46-249.us-west-2.compute.internal'\nlinux_12_private_dns = 'ip-172-31-35-228.us-west-2.compute.internal'\nlinux_13_private_dns = 'ip-172-31-46-124.us-west-2.compute.internal'\nlinux_14_private_dns = 'ip-172-31-37-229.us-west-2.compute.internal'\nlinux_15_private_dns = 'ip-172-31-33-1.us-west-2.compute.internal'\nlinux_16_private_dns = 'ip-172-31-33-1.us-west-2.compute.internal'\nlinux_17_private_dns = 'ip-172-31-34-193.us-west-2.compute.internal'\nlinux_18_private_dns = 'ip-172-31-41-135.us-west-2.compute.internaal'\nlinux_19_private_dns = 'ip-172-31-41-171.us-west-2.compute.internal'\nlinux_20_private_dns = 'ip-172-31-42-67.us-west-2.compute.internal'\n\nlinux_7_public_dns = 'ec2-54-244-133-194.us-west-2.compute.amazonaws.com'\nlinux_8_public_dns = 'ec2-54-186-251-208.us-west-2.compute.amazonaws.com'\nlinux_9_public_dns = 'ec2-54-212-234-43.us-west-2.compute.amazonaws.com'\nlinux_20_public_dns = 'ec2-54-214-222-14.us-west-2.compute.amazonaws.com'\n\n#Choose server\npix_server = raw_input('Enter a server number between 1 and 20: ')\n\nif pix_server == '1':\n selected_server = linux_1_private_dns\nelif pix_server == '2':\n selected_server = linux_2_private_dns\nelif pix_server == '3':\n selected_server = linux_3_private_dns\nelif pix_server == '4':\n selected_server = linux_4_private_dns\nelif pix_server == '5':\n selected_server = linux_5_private_dns\nelif pix_server == '6':\n selected_server = linux_6_private_dns\nelif pix_server == '7':\n selected_server = linux_7_private_dns\n \n#elif pix_server == '8':\n# selected_server = linux_8_private_dns\n#elif pix_server == '9':\n# selected_server = linux_9_private_dns \n\n#Public instances names for testing\nelif pix_server == '7':\n selected_server = linux_7_public_dns \nelif pix_server == '8':\n selected_server = linux_8_public_dns\nelif pix_server == '9':\n selected_server = linux_9_public_dns\nelif pix_server == '20':\n selected_server = linux_20_public_dns\n\n\nelif pix_server == '10':\n selected_server = linux_10_private_dns\nelif pix_server == '11':\n selected_server = linux_11_private_dns\nelif pix_server == '12':\n selected_server = linux_12_private_dns\nelif pix_server == '13':\n selected_server = linux_13_private_dns\nelif pix_server == '14':\n selected_server = linux_14_private_dns\nelif pix_server == '15':\n selected_server = linux_15_private_dns\nelif pix_server == '16':\n selected_server = linux_16_private_dns\nelif pix_server == '17':\n selected_server = linux_17_private_dns\nelif pix_server == '18':\n selected_server = linux_18_private_dns\nelif pix_server == '19':\n selected_server = linux_19_private_dns\n#elif pix_server == '20':\n# selected_server = linux_20_private_dns\n\n\nelse:\n selected_server = None\n print('Invalid choice. Please run again and choose a valid number between 1 and 20.')\n\nlinux_1_instance_id = 'i-04adbf6bd736ccfc2'\nlinux_2_instance_id = 'i-077015546d5be1c59'\nlinux_3_instance_id = 'i-0aec30aefce006a7d'\nlinux_4_instance_id = 'i-0be35c9980cc4deed'\nlinux_5_instance_id = 'i-0f06c4a2d9d7c63e5'\nlinux_6_instance_id = 'i-0da998439610ade74'\nlinux_7_instance_id = 'i-02173d786cd322233'\nlinux_8_instance_id = 'i-070df12df86332a0a'\nlinux_9_instance_id = 'i-094cf906d9e93cb65'\nlinux_10_instance_id = 'i-0bac8ae67e32657f8'\nlinux_11_instance_id = 'i-0c86f92788b1cf247'\nlinux_12_instance_id = 'i-0d278c6c9e882d237'\nlinux_13_instance_id = 'i-0039ff54eaea0e29b'\nlinux_14_instance_id = 'i-01e3fccdec28e922c'\nlinux_15_instance_id = 'i-02072d30e2a663577'\nlinux_16_instance_id = 'i-05cb95a18a70cee96'\nlinux_17_instance_id = 'i-06d9d376a9e783d6a'\nlinux_18_instance_id = 'i-0a8082c89b968e252'\nlinux_19_instance_id = 'i-0b118b6e07d098809'\nlinux_20_instance_id = 'i-0c6dd1d5c938305be'\n\n#Start EC2 instance\n#client = boto3.client('ec2')\n#response = client.start_instances(InstanceIds=['i-0c86f92788b1cf247'], AdditionalInfo='string', DryRun=False)\n\n#Create new instancs(s) using AMI id ami-e806df90 (Linux Processing Server)????????\n\n#Connect via SSH and run commands\n#Change this to local key file\naws_private_key_file = \"/Users/michaelokonski/Dropbox/SolSpec/Solspec_Logins/Pix4DProcessingServers.pem\"\nk = paramiko.RSAKey.from_private_key_file(aws_private_key_file)\nc = paramiko.SSHClient()\nc.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nc.connect( hostname = selected_server, username = 'ubuntu', pkey = k )\nprint('Sucessfully connected to ' + selected_server + ', ' + 'Pix4D Linux Process Server ' + pix_server)\n\n##Payload Variables\ns3_project_folder = raw_input('Enter the full key value in s3: ')\nbucket_name = s3_project_folder.split(os.sep)[0]\nparent_folder = s3_project_folder.split(os.sep)[1]\n\n#s3 out location\n#out_location = raw_input('Enter the full bucket/key path/directory where you want the project written to: ')\n\n#Convert geolocation file. Needs a local workspace as s3 objects are immutable\next = '.csv'\ns3_resource = boto3.resource('s3')\ns3_client = boto3.client('s3')\nlocal_temp_folder = '/Users/michaelokonski/Desktop/solspec-test-mo/20180227_54037'\n\ndef pd_read_csv_s3(path, *args, **kwargs):\n path = path.replace('s3://', '')\n bucket, key = path.split('/', 1)\n obj = s3_client.get_object(Bucket=bucket, Key=key)\n return pd.read_csv(io.BytesIO(obj['Body'].read()), *args, **kwargs)\n\nfile = pd_read_csv_s3(s3_project_folder + '/' + parent_folder + '.tgs', header=None)\nfile[0] = file[0] + '.jpg'\n\noutfile = local_temp_folder + '/' + parent_folder + ext\nwrite_outfile = file.to_csv(outfile, sep=',', index=False, header=False)\ns3_client.upload_file(outfile, bucket_name, parent_folder + '/' + parent_folder + ext)\n\n#Remove file from local system\nos.remove(outfile)\nprint('Geolocation file conversion completed and copied successfully.')\n\n#Sync to local server\nstdin , stdout, stderr = c.exec_command('mkdir -p ' + s3_project_folder)\nprint(\"stdout: \" + stdout.read())\nprint(\"stderr: \" + stderr.read())\n\n#To do......change buffer size on console ssh output. Currently waits until operation has complete to print to console.\n\nstdin , stdout, stderr = c.exec_command('aws s3 sync s3://' + s3_project_folder + ' ' + s3_project_folder)\nprint('syncing from s3 to local.........................................................')\nprint(\"stdout: \" + stdout.read() + 'Sync from s3 to local completed successfully.')\nprint(\"stderr: \" + stderr.read())\n\n##Pix4D starts here \nstdin , stdout, stderr = c.exec_command('pix4dmapper -c --email [email protected] --password H2@2014!')\nprint(\"stdout: \" + stdout.read())\nprint(\"stderr: \" + stderr.read())\n\nstart_text = 'pix4dmapper -c -n --image-dir '\ngeo_file_text = ' --geolocation-format pix4d-lat-long --geolocation-file '\nout_path = 'process_jobs/'\n\nstdin , stdout, stderr = c.exec_command(start_text + s3_project_folder + geo_file_text + s3_project_folder + '/' + parent_folder + ext + ' ' + out_path + s3_project_folder + '.p4d')\nprint(\"stdout: \" + stdout.read() + '............Project created successfully............')\nprint(\"stderr: \" + stderr.read())\n\n##Run project\nstdin , stdout, stderr = c.exec_command('pix4dmapper -c -r ' + out_path + s3_project_folder.split(os.sep)[0] + '/' + parent_folder + '.p4d')\nprint('Project started at: ' + stdout.read())\nprint(\"stderr: \" + stderr.read())\n\nstdin , stdout, stderr = c.exec_command('cd' + ' ' + bucket_name + ' && ' + 'pwd' + ' && ' + 'aws s3 sync . s3://' + bucket_name)\nprint(\"stdout: \" + stdout.read())\nprint(\"stderr: \" + stderr.read())\n\n#Sync to out_location\nstdin , stdout, stderr = c.exec_command('aws s3 sync . s3://' + bucket_name)\nprint('syncing from local to s3.........................................................')\nprint(\"stdout: \" + stdout.read() + 'Sync from local to s3 completed successfully.')\nprint(\"stderr: \" + stderr.read())\n\n#To do......sync/cp derivative folders to discrete s3 or ec2 locations e.g. ortho to ortho folder/key, 3d folder to 3d folder out etc.\n#c.close()\n\n#Stop EC2 instance\n#client = boto3.client('ec2')\n#response = client.start_instances(InstanceIds=['i-0c86f92788b1cf247'], AdditionalInfo='string', DryRun=False)\n\n#Terminate EC2 instance?????????\n\n" }, { "alpha_fraction": 0.6535113453865051, "alphanum_fraction": 0.6720835566520691, "avg_line_length": 33.875, "blob_id": "b365854c6354e5200df618a5bf83a2f9cdb50979", "content_id": "42371cc8f4ec54fa5c0a7785506d3a0ffcfbeeea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3446, "license_type": "no_license", "max_line_length": 123, "num_lines": 96, "path": "/MasterPixScript.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\n#Import libraries\r\nimport boto3\r\nimport subprocess\r\nimport time\r\nimport paramiko\r\nfrom scp import SCPClient\r\n\r\n#Variables\r\ns3In = raw_input(\"Enter full Project path: \")\r\nkey = paramiko.RSAKey.from_private_key_file(\"/home/ubuntu/Documents/Pix4DProcessingServers.pem\")\r\nconn = paramiko.SSHClient()\r\nconn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\nuser = \"ubuntu\"\r\nlclPath = '/home/ubuntu/Documents/Scripts/'\r\n#script1 = 'LS_NoGeoFile_PixScript_v2.0.sh'\r\n#script2 = 'LS_GeoFile_PixScript_v2.0.sh'\r\nremPath = '/home/ubuntu/pix4d/'\r\n#commands = [\"echo $PATH\", \"cat /etc/fstab\"]\r\n\r\n#Determine which script to use - drone or waldo\r\nif \"drone\" in s3In:\r\n\tscript = 'LS_NoGeoFile_PixScript_v2.1.sh'\r\nelif \"waldo\" in s3In:\r\n\tscript = 'LS_GeoFile_PixScript_v2.1.sh'\r\nelse:\r\n\tprint(\"Project path is not correct. Please use correct Drone or Waldo folder path.\")\r\n\texit(1)\r\n\r\n#Query AWS for all available Processing server (by Role tag and state code = 80 - stopped)\r\nec2 = boto3.resource('ec2')\r\ninstances = ec2.instances.filter(\r\n Filters=[\r\n {'Name': 'tag:Role', 'Values': ['ProcessingTest']},\r\n {'Name': 'instance-state-code', 'Values': ['80']}\r\n ]\r\n)\r\n\r\n#Read variable into a list (should be able to query servers directly into list)\r\navailable = []\r\nfor instance in instances:\r\n available.append(instance)\r\n\r\n#Verify list isn't empty and set the Instance variable to first server in the list\r\nif len(available) == 0:\r\n print(\"No servers are available at this time\")\r\n exit(1)\r\nelse:\r\n server = available[0] #This variable allows it to be using in the local processing script\r\n inst = server.instance_id\r\n instDNSName = server.private_dns_name\r\n for tag in server.tags:\r\n if 'ServerName' in tag['Key']:\r\n serverName = tag['Value']\r\n #print(serverName)\r\n connStr = \"ubuntu@\" + instDNSName\r\n\r\n#commands = {\"sudo chmod +x \" \"{0}{1}\".format(remPath, script),\r\n# \"{0}{1} {2} {3} {4} {5}\".format(remPath, script, s3In, inst, script, serverName)}\r\n\r\nprint(\"Starting server \" + serverName)\r\nsubprocess.call(['aws', 'ec2', 'start-instances', '--instance-ids', inst])\r\n\r\n#Wait 2 mins for server to start - rewrite to loop through state code until \"running\"\r\nprint(\"Waiting 2 mins for server to start\")\r\ntime.sleep(120)\r\n\r\nconn.connect(hostname = instDNSName, username = user, pkey = key)\r\nprint(\"Copying local processing scripts to remote server\")\r\nwith SCPClient(conn.get_transport()) as scp:\r\n scp.put(lclPath + script, remPath + script)\r\n #scp.put(lclPath + script2, remPath + script2)\r\n\r\nprint(\"Make local script executable\")\r\nprint(\"Executing {}\".format(\"sudo chmod +x \" \"{0}{1}\".format(remPath, script)))\r\nstdin, stdout, stderr = conn.exec_command(\"sudo chmod +x \" \"{0}{1}\".format(remPath, script))\r\nprint stdout.read()\r\nprint(\"Errors\")\r\nprint stderr.read()\r\n\r\nprint(\"Executing script on remote Pix server\")\r\nprint(\"Executing {}\".format(\"{0}{1} {2} {3} {4} {5}\".format(remPath, script, s3In, inst, script, serverName)))\r\nstdin, stdout, stderr = conn.exec_command(\"{0}{1} {2} {3} {4} {5}\".format(remPath, script, s3In, inst, script, serverName))\r\nprint stdout.read()\r\nprint(\"Errors\")\r\nprint stderr.read()\r\n\r\n#for command in commands:\r\n# print \"Executing {}\".format(command)\r\n# stdin, stdout, stderr = conn.exec_command(command)\r\n# print stdout.read()\r\n# print(\"Errors\")\r\n# print stderr.read()\r\nconn.close()\r\nexit(0)\r\n\r\n" }, { "alpha_fraction": 0.6195722818374634, "alphanum_fraction": 0.6351263523101807, "avg_line_length": 22.044776916503906, "blob_id": "5eee07ea8fe7aa05f962bf158da734563bf9458f", "content_id": "58b2bc815948216edcf8748ab9e6f5acfdd24c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1543, "license_type": "no_license", "max_line_length": 63, "num_lines": 67, "path": "/lambda_unzip_s3.py", "repo_name": "SolSpecSolutions/PyStuffPixAWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 2 11:54:46 2018\n\nLambda function to unzip in s3 from trigger event\n\n@author: michaelokonski\n\"\"\"\n\nimport os\nimport tempfile\nimport zipfile\n\nfrom concurrent import futures\nfrom io import BytesIO\n\nimport boto3\n\ns3 = boto3.client('s3')\n\n\ndef lambda_handler(event, context):\n # Parse and prepare args from event\n global bucket, path, zipdata\n event = next(iter(event['Records']))\n bucket = event['s3']['bucket']['name']\n key = event['s3']['object']['key']\n path = os.path.dirname(key)\n\n # Create temporary file\n temp_file = tempfile.mktemp()\n\n # Fetch and load target file\n s3.download_file(bucket, key, temp_file)\n zipdata = zipfile.ZipFile(temp_file)\n\n #Method acts xtract to key\n with futures.ThreadPoolExecutor(max_workers=4) as executor:\n future_list = [\n executor.submit(extract, filename)\n for filename in zipdata.namelist()\n ]\n\n result = {'success': [], 'fail': []}\n for future in future_list:\n filename, status = future.result()\n result[status].append(filename)\n\n # Remove extracted archive file\n s3.delete_object(Bucket=bucket, Key=key)\n\n return result\n\n\ndef extract(filename):\n upload_status = 'success'\n try:\n s3.upload_fileobj(\n BytesIO(zipdata.read(filename)),\n bucket,\n os.path.join(path, filename)\n )\n except Exception:\n upload_status = 'fail'\n finally:\n return filename, upload_status" } ]
7
MegAjith/MegAjith.github.io
https://github.com/MegAjith/MegAjith.github.io
c8079e6f5f3fa47d694da1bd44453fbd3e5fabda
e212f7959f75cccf79491144a97294fdfff87274
9df0a1135bd42ea818f3d3697a2456fba6f8ba64
refs/heads/main
2023-05-02T04:10:51.777260
2021-05-11T11:03:43
2021-05-11T11:03:43
365,746,571
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6145397424697876, "alphanum_fraction": 0.6229079365730286, "avg_line_length": 29.344263076782227, "blob_id": "ae81915adddb1f9ddfde4e254b4ad3f797bfa806", "content_id": "b1dc79651feb52b53253fc6c04f63739a276b807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1912, "license_type": "no_license", "max_line_length": 83, "num_lines": 61, "path": "/app.py", "repo_name": "MegAjith/MegAjith.github.io", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\nimport numpy as np\r\nimport os \r\nimport keras\r\nfrom flask import Flask, render_template, request\r\nfrom werkzeug.utils import secure_filename\r\nfrom werkzeug.datastructures import FileStorage\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport json\r\nimport glob\r\nimport numpy as np\r\nimport shutil\r\nfrom keras.models import model_from_json\r\n\r\n\r\nwith open('/Users/megha/Projects/model_architecture.json', 'r') as json_file:\r\n model = model_from_json(json_file.read())\r\nmodel.load_weights('model_weights.h5')\r\nmodel.compile(optimizer= 'adam', loss= 'binary_crossentropy', metrics=['accuracy'])\r\n\r\n\r\napp = Flask(__name__)\r\n \r\napp.config['UPLOAD_FOLDER'] = 'C:/Users/megha/Projects/uploaded/image/'\r\n \r\[email protected]('/')\r\ndef upload_f():\r\n return render_template('upload.html')\r\n \r\ndef finds():\r\n test_datagen = ImageDataGenerator(rescale = 1./255)\r\n test_dir = 'uploaded'\r\n test_generator = test_datagen.flow_from_directory(\r\n test_dir,\r\n target_size =(250, 400),\r\n color_mode =\"rgb\",\r\n shuffle = False,\r\n class_mode ='binary',\r\n batch_size = 1)\r\n pred = model.predict(test_generator)\r\n print(pred)\r\n return pred\r\n #return str(vals[np.argmax(pred)])\r\n \r\[email protected]('/uploader', methods = ['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n print(f.filename)\r\n cwd=os.path.join(os.getcwd(),\"uploaded\",\"image\")\r\n f.save(os.path.join(cwd,secure_filename(f.filename)))\r\n try:\r\n val = finds()\r\n except Exception as e:\r\n val=e\r\n shutil.rmtree('C:/Users/megha/Projects/uploaded/image/')\r\n os.mkdir('C:/Users/megha/Projects/uploaded/image/')\r\n return render_template('pred.html', ss = \"%.2f\" % (val.item()*100))\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 17.5, "blob_id": "650fca528f3e44b4beab293d1ba7955e30f829d4", "content_id": "1cd462d9c13c33900ed1583671e993e413526385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "MegAjith/MegAjith.github.io", "src_encoding": "UTF-8", "text": "# MegAjith.github.io\nProject website\n" } ]
2
ellsharp/osuRippleStats
https://github.com/ellsharp/osuRippleStats
6b6195933a0f42a12eee1327aff1643821963edf
4a448c674522e81666b06b94dc7a33bc73137d86
d5fc748217cb1fa517f8b4d97f743d8d81741a47
refs/heads/master
2020-04-07T05:17:08.368741
2019-02-11T04:07:19
2019-02-11T04:07:19
158,090,472
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5754325985908508, "alphanum_fraction": 0.5890811681747437, "avg_line_length": 47.27058792114258, "blob_id": "80368fdfa8bb4d8297d2972151f1abc825bf7c05", "content_id": "10ebbe64f8dcf3e604099ab7142339d8a1f2280b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4103, "license_type": "no_license", "max_line_length": 161, "num_lines": 85, "path": "/ors/main/users_activity.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script.ripple_api import RippleApi\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_activity import UsersActivity\n UsersActivity().execute()\n\nclass UsersActivity(object):\n global log\n global database\n global connection\n log = logger.logger('users_activity')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersActivity')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n self.__set_users_activity(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersActivity')\n except Exception as e:\n log.critical('ORSC0001', 'UsersActivity', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __set_users_activity(self, user_id):\n ripple_api = RippleApi()\n result = database.execute_statement(connection, 't_users_activity_S02', user_id)\n transact_scores = result[1]\n counter = 0\n for transact_score in transact_scores:\n leaderboard_scores = ripple_api.get_leaderboard(transact_score['beatmap_md5'], transact_score['play_mode'])\n leaderboard_scores = leaderboard_scores['scores']\n transact_score_score = transact_score['score']\n transact_score_score_id = transact_score['score_id']\n transact_score_user_id = transact_score['user_id']\n ranking = 1\n # Fixed issue #1\n if (leaderboard_scores != None):\n for leaderboard_score in leaderboard_scores:\n # Get score's ranking.\n leaderboard_score_score = leaderboard_score['score']\n leaderboard_score_score_id = leaderboard_score['id']\n leaderboard_score_user_id = leaderboard_score['user']['id']\n if leaderboard_score_score_id == transact_score_score_id:\n break\n elif leaderboard_score_user_id == transact_score_user_id:\n pass\n elif transact_score_score > leaderboard_score_score:\n break\n else:\n ranking = ranking + 1\n else:\n ranking = -1\n # Set users activity.\n beatmap_md5 = transact_score['beatmap_md5']\n result = database.execute_statement(connection, 't_users_activity_S03', beatmap_md5)\n activity_score = result[1][0]['score']\n if (activity_score == None or activity_score < transact_score['score']):\n result = database.execute_statement(connection, 'm_beatmaps_S02', beatmap_md5)\n beatmap_id = result[1][0]['beatmap_id']\n song_name = result[1][0]['song_name']\n activity = converter.convert_activity(transact_score, beatmap_id, song_name, ranking)\n result = database.execute_statement_values(connection, 't_users_activity_I01', activity.values())\n log.debug('ORSD0014', transact_score['user_id'], transact_score['score_id'], song_name, transact_score['score'], transact_score['rank'], ranking)\n # Mark the score has processed on transaction.\n result = database.execute_statement(connection, 'l_scores_on_activity_I01', user_id, transact_score_score_id, 1, transact_score['created_on'])\n else:\n # Mark the score has processed on transaction.\n result = database.execute_statement(connection, 'l_scores_on_activity_I01', user_id, transact_score_score_id, 3, transact_score['created_on'])\n counter = counter + 1\n log.info('ORSI0011', counter, user_id)\n" }, { "alpha_fraction": 0.6042850613594055, "alphanum_fraction": 0.6125929355621338, "avg_line_length": 29.49333381652832, "blob_id": "4dd2913a06d13d25dd3537a60156b5ecb5e2e5d6", "content_id": "f833f8eeeb03fcb49b02fa26af170d76a1142d0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2287, "license_type": "no_license", "max_line_length": 82, "num_lines": 75, "path": "/ors/script/database_old.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import re\nimport sys\nimport pymysql\nfrom ors.script import util\nfrom ors.script import logger\nlog = logger.logger('Database')\n\ndef get_connection():\n config = util.read_config()\n host = config['database']['host']\n db = config['database']['db']\n user = config['database']['user']\n password = config['database']['password']\n charset = config['database']['charset']\n try:\n connection = pymysql.connect(\n host=host,\n db=db,\n user=user,\n password=password,\n charset=charset,\n cursorclass=pymysql.cursors.DictCursor\n )\n except Exception as e:\n print(e)\n sys.exit(1)\n return connection\n\ndef execute_statement(sql_name, *parameters):\n global log\n sql_path = 'sql/' + sql_name + '.sql'\n sql_file = open(sql_path, 'r')\n sql = sql_file.read()\n statement = sql % parameters\n log.debug('ORSD0004', compress_statement(statement))\n try:\n connection = get_connection()\n cursor = connection.cursor()\n count = cursor.execute(statement)\n result = cursor.fetchall()\n connection.commit()\n cursor.close()\n connection.close()\n except Exception as e:\n # When failed to commit statement, rollback table and system abnormal end.\n connection.rollback()\n log.critical('ORSC0001', 'execute_statement', e)\n sys.exit(1)\n return [count, result]\n\ndef execute_statement_values(sql_name, values):\n global log\n sql_path = 'sql/' + sql_name + '.sql'\n sql_file = open(sql_path, 'r')\n sql = sql_file.read()\n statement = sql % tuple(values)\n log.debug('ORSD0004', compress_statement(statement))\n try:\n connection = get_connection()\n cursor = connection.cursor()\n count = cursor.execute(statement)\n result = cursor.fetchall()\n connection.commit()\n cursor.close()\n connection.close()\n except Exception as e:\n # When failed to commit statement, rollback table and system abnormal end.\n connection.rollback()\n log.critical('ORSC0001', 'execute_statement', e)\n sys.exit(1)\n return [count, result]\n\ndef compress_statement(statement):\n statement = re.sub(r\"\\s+\", \" \", statement)\n return statement\n" }, { "alpha_fraction": 0.6743119359016418, "alphanum_fraction": 0.6743119359016418, "avg_line_length": 12.625, "blob_id": "4f26c4ce3ab6a0d5211c787f2c0832cfe3a29822", "content_id": "388203d965c61a8b3e2d655e9ba1c0a7607cc22e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 218, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/sql/t_users_badges_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n t_users_badges\nSELECT\n *\nFROM\n w_users_badges work\nWHERE\n work.user_id = %s\nAND NOT EXISTS(\n SELECT\n *\n FROM\n t_users_badges transaction\n WHERE\n transaction.created_on = work.created_on\n);\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 7.142857074737549, "blob_id": "08d0c210e8fa0a7abb4674b74fdd7dbc10f3c0cc", "content_id": "9b056c216a2c7ae9d05ad9b23044e99a68d0735b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 57, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/sql/m_first_place_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n m_first_place\nWHERE\n user_id = '%s'\n;\n" }, { "alpha_fraction": 0.56611567735672, "alphanum_fraction": 0.5798898339271545, "avg_line_length": 43, "blob_id": "be5b39af151cfe4b2110a4f708c2ef573a5faae3", "content_id": "337a718ff6f69b344f59da823eb4310a364a672c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2904, "license_type": "no_license", "max_line_length": 131, "num_lines": 66, "path": "/ors/main/users_stats_monthly.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\nfrom ors.script import util\n\nif __name__ == \"__main__\":\n from ors.main.users_stats_monthly import UsersStatsMonthly\n UsersStatsMonthly().execute()\n\nclass UsersStatsMonthly(object):\n global log\n global database\n global connection\n log = logger.logger('users_stats_monthly')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n process_name = 'UsersStatsMonthly'\n try:\n log.info('ORSI0001', process_name)\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n self.__set_users_stats_monthly(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', process_name)\n except Exception as e:\n log.critical('ORSC0001', process_name, e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __set_users_stats_monthly(self, user_id):\n process_month = util.datetime_now().strftime('%Y-%m')\n # Get how long stats are exists on transaction table.\n result = database.execute_statement(connection, 't_users_stats_S02', user_id)\n if (result[0] < 2):\n pass\n else:\n months = result[1];\n for month in months:\n month = month['created_on']\n result = database.execute_statement(connection, 't_users_stats_monthly_S01', month, user_id)\n count = result[1][0]['count']\n if (count == 1 and process_month != month) :\n pass\n else:\n result = database.execute_statement(connection, 't_users_stats_monthly_S02', month, user_id, user_id)\n month_latest_stats = result[1][0]\n result = database.execute_statement(connection, 't_users_stats_monthly_S03', month, user_id, user_id)\n month_oldest_stats = result[1][0]\n monthly_stats = converter.convert_monthly_stats(month, month_latest_stats, month_oldest_stats)\n if (count == 0):\n result = database.execute_statement_values(connection, 't_users_stats_monthly_I01', monthly_stats.values())\n elif (count == 1 and process_month == month):\n del monthly_stats['created_on']\n monthly_stats.update(user_id_key=user_id)\n monthly_stats.update(month_key=month)\n result = database.execute_statement_values(connection, 't_users_stats_monthly_U01', monthly_stats.values())\n" }, { "alpha_fraction": 0.5251747965812683, "alphanum_fraction": 0.527039647102356, "avg_line_length": 35.35593032836914, "blob_id": "da4acb56932b4fee950379785041e1c97380eb9a", "content_id": "1035d6c5f382907c4d2b679ebaa1766da82a3874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4290, "license_type": "no_license", "max_line_length": 87, "num_lines": 118, "path": "/www/manage/portal.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n require_once './functions.php';\n require_logined_session();\n header('Content-Type: text/html; charset=UTF-8');\n?>\n<html>\n<head>\n <title>ORS Management Portal</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/semantic/dist/semantic.min.css\">\n <script src=\"/semantic/dist/semantic.min.js\"></script>\n</head>\n<body>\n <div class=\"ui container\">\n <div class=\"ui menu\">\n <div class=\"right item\">\n <h1 class=\"ui header center aligned\">ORS Management Portal</h1>\n </div>\n <div class=\"right item\">\n <a href=\"/manage/logout.php?token=<?=h(generate_token()) ?>\">\n <div class=\"ui button\">Log out</div>\n </a>\n </div>\n </div>\n <div class=\"ui three column relaxed grid\">\n <div class=\"column\">\n <div class=\"ui red attached center aligned segment\">\n <div class=\"ui large header\">\n <i class=\"tiny newspaper outline icon\"></i>Score\n </div>\n </div>\n <div class=\"ui attached segment\">\n <form class=\"ui form\" method=\"post\" action=\"/manage/portal.php\">\n <div class=\"ui fluid icon input\">\n <input type=\"text\" name=\"score_id\" value=\"\" placeholder=\"score_id\">\n <i class=\"search icon\"></i>\n </div>\n </form>\n </div>\n </div>\n <div class=\"column\">\n <div class=\"ui blue attached center aligned segment\">\n <div class=\"ui large header\">\n <i class=\"tiny music icon\"></i>Beatmap\n </div>\n </div>\n <div class=\"ui attached segment\">\n <form class=\"ui form\" method=\"post\" action=\"/manage/portal.php\">\n <div class=\"ui fluid icon input\">\n <input type=\"text\" name=\"beatmap_md5\" value=\"\" placeholder=\"beatmap_md5\">\n <i class=\"search icon\"></i>\n </div>\n </form>\n </div>\n <div class=\"ui attached segment\">\n <form class=\"ui form\" method=\"post\"action=\"/manage/portal.php\">\n <div class=\"ui fluid icon input\">\n <input type=\"text\" name=\"beatmap_id\" value=\"\" placeholder=\"beatmap_id\">\n <i class=\"search icon\"></i>\n </div>\n </form>\n </div>\n </div>\n <div class=\"column\">\n <div class=\"ui green attached center aligned segment\">\n <div class=\"ui large header\">\n <i class=\"tiny user icon\"></i>User\n </div>\n </div>\n <div class=\"ui attached segment\">\n <form class=\"ui form\" method=\"post\" action=\"/manage/portal.php\">\n <div class=\"ui fluid icon input\">\n <input type=\"text\" name=\"user_id\" value=\"\" placeholder=\"user_id\">\n <i class=\"search icon\"></i>\n </div>\n </form>\n </div>\n <div class=\"ui attached segment\">\n <form class=\"ui form\" method=\"post\" action=\"/manage/portal.php\">\n <div class=\"ui fluid icon input\">\n <input type=\"text\" name=\"username\" value=\"\" placeholder=\"username\">\n <i class=\"search icon\"></i>\n </div>\n </form>\n </div>\n </div>\n </div>\n <?php if (isset($_POST['score_id'])): ?>\n <div class=\"ui red segment\">\n <div class=\"ui header\">Search Result</div>\n <?php execute_score_search($_POST['score_id']); ?>\n </div>\n <?php endif; ?>\n <?php if (isset($_POST['beatmap_md5'])): ?>\n <div class=\"ui blue segment\">\n <div class=\"ui header\">Search Result</div>\n <?php execute_beatmap_search_md5($_POST['beatmap_md5']); ?>\n </div>\n <?php endif; ?>\n <?php if (isset($_POST['beatmap_id'])): ?>\n <div class=\"ui blue segment\">\n <div class=\"ui header\">Search Result</div>\n <?php execute_beatmap_search_id($_POST['beatmap_id']); ?>\n </div>\n <?php endif; ?>\n <?php if (isset($_POST['user_id'])): ?>\n <div class=\"ui green segment\">\n <div class=\"ui header\">Search Result</div>\n <?php execute_user_search_id($_POST['user_id']); ?>\n </div>\n <?php endif; ?>\n <?php if (isset($_POST['username'])): ?>\n <div class=\"ui green segment\">\n <div class=\"ui header\">Search Result</div>\n <?php execute_user_search_name($_POST['username']); ?>\n </div>\n <?php endif; ?>\n </div>\n</html>\n" }, { "alpha_fraction": 0.6724137663841248, "alphanum_fraction": 0.6724137663841248, "avg_line_length": 10.600000381469727, "blob_id": "cff2df1b950406a1937427a2a5c6903b26df10d9", "content_id": "0ac3b804f6d6c7522b4ab4dc66183ddb09aabb5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 58, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/sql/w_users_silence_info_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n w_users_silence_info\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.6553672552108765, "alphanum_fraction": 0.6553672552108765, "avg_line_length": 10.800000190734863, "blob_id": "7205a8d7b9beaf43074ec18590c978e396467148", "content_id": "c5824c91997ba8c6cca1d116458420a92140ae1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 177, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/sql/m_beatmaps_S03.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n w_beatmaps work\nWHERE NOT EXISTS(\n SELECT\n 'X'\n FROM\n m_beatmaps master\n WHERE\n master.beatmap_id = work.beatmap_id\n)\nGROUP BY\n work.beatmap_id\n;\n" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 5.199999809265137, "blob_id": "4aed8e16d5b5bb332fd254fc1242c690a728ccbe", "content_id": "686c548b77f7b0c0c72f9cf5434c2296cb255e9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 31, "license_type": "no_license", "max_line_length": 12, "num_lines": 5, "path": "/sql/w_beatmaps_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n w_beatmaps\n;\n" }, { "alpha_fraction": 0.6113097071647644, "alphanum_fraction": 0.6262593269348145, "avg_line_length": 40.58108139038086, "blob_id": "fa3bbaaad7af9f230564d157fadd9ed8a2fa8cc5", "content_id": "a461b487defa6798caf452d28f36b34ea8e92718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3077, "license_type": "no_license", "max_line_length": 110, "num_lines": 74, "path": "/ors/main/users_scores_work.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.ripple_api import RippleApi\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import database\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_scores_work import UsersScoresWork\n UsersScoresWork().execute()\n\nclass UsersScoresWork(object):\n global log\n global database\n global connection\n log = logger.logger('users_scores_work')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersScoresWork')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n mode = 0 # In debugging always standard mode\n user_id = __user_id['user_id']\n users_scores = self.__get_users_scores(user_id, mode)\n users_scores = users_scores['scores']\n self.__set_users_scores_work(user_id, users_scores, mode)\n self.__set_beatmaps_work(users_scores, mode)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersScoresWork')\n except Exception as e:\n log.critical('ORSC0001', 'UsersScoresWork', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_S02')\n user_ids = result[1]\n return user_ids\n\n def __get_users_scores(self, user_id, mode):\n ripple_api = RippleApi()\n users_scores = ripple_api.get_users_scores_recent(user_id, mode)\n return users_scores\n\n def __set_users_scores_work(self, user_id, users_scores, mode):\n users_scores_temp = []\n for users_score in users_scores:\n users_score = converter.convert_users_score(user_id, users_score)\n users_scores_temp.append(users_score)\n users_scores = users_scores_temp\n result = database.execute_statement(connection, 'w_users_scores_D01', user_id, mode)\n log.debug('ORSD0001', 'w_users_scores', result[0], user_id)\n score_counter = 0\n for users_score in users_scores:\n result = database.execute_statement_values(connection, 'w_users_scores_I01', users_score.values())\n score_counter = score_counter + result[0]\n log.debug('ORSD0002', 'w_users_scores', score_counter, user_id)\n\n def __set_beatmaps_work(self, users_scores, mode):\n beatmaps = []\n for users_score in users_scores:\n beatmaps.append(users_score['beatmap'])\n result = database.execute_statement(connection, 'w_beatmaps_D01')\n log.debug('ORSD0005', 'w_users_scores', result[0])\n beatmap_counter = 0\n for beatmap in beatmaps:\n beatmap = converter.convert_beatmap(beatmap, mode)\n result = database.execute_statement_values(connection, 'w_beatmaps_I01', beatmap.values())\n beatmap_counter = beatmap_counter + result[0]\n log.debug('ORSD0006', 'w_beatmaps', beatmap_counter)\n" }, { "alpha_fraction": 0.5389019250869751, "alphanum_fraction": 0.5547749400138855, "avg_line_length": 36.3106803894043, "blob_id": "bb65a2b8524114bb5a0f67d19f6155d271c3a5d9", "content_id": "13587b77e25bd912d9aa45d27a5baf4580f9b150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3843, "license_type": "no_license", "max_line_length": 122, "num_lines": 103, "path": "/ors/script/ripple_api.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "from ors.script import ripple_api\nfrom ors.script import util\nimport requests\nimport time\nimport sys\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n util.count_up_api_request()\n\nclass RippleApi(object):\n global log\n log = logger.logger('ripple_api')\n config = ''\n ripple_token = ''\n\n def __init__(self):\n self.config = util.read_config()\n self.ripple_token = self.config['token']['X-Ripple-Token']\n\n def get_ping(self):\n \"\"\"\n Check the Ripple is alive.\n \"\"\"\n api_url = self.config['url']['ping']\n try:\n api_response = requests.get(api_url)\n response = api_response.json()\n except Exception as e:\n sys.exit(1)\n # Wait a second to don't get a high load on Ripple API.\n ripple_api.count_up_api_request()\n return response\n\n def get_users_full(self, user_id):\n api_url = self.config['url']['users_full']\n api_parameters = {'X-Ripple-Token': self.ripple_token, 'id': user_id}\n response = self.get_api_response(api_url, api_parameters)\n return response\n\n def get_users_scores_recent(self, user_id, mode):\n api_url = self.config['url']['users_scores_recent']\n api_parameters = {'X-Ripple-Token': self.ripple_token, 'id': user_id, 'mode': mode, 'l': 100}\n response = self.get_api_response(api_url, api_parameters)\n return response\n\n def get_leaderboard(self, beatmap_md5, mode):\n api_url = self.config['url']['scores']\n api_parameters = {'X-Ripple-Token': self.ripple_token, 'md5': beatmap_md5, 'mode': mode, 'l': 50, 'sort': 'score'}\n response = self.get_api_response(api_url, api_parameters)\n return response\n\n def get_beatmap_info(self, beatmap_md5, mode):\n api_url = self.config['url']['get_beatmaps']\n api_parameters = {'limit': 1, 'h': beatmap_md5, 'm': mode}\n response = self.get_api_response_peppy(api_url, api_parameters)\n return response\n\n def get_api_response(self, api_url, api_parameters):\n retry_count = 2\n for i in range(3):\n try:\n api_response = requests.get(api_url, params=api_parameters)\n response = api_response.json()\n # Check Ripple API's return code.\n response_code = response['code']\n if response_code == 200:\n log.debug('ORSD0003', api_url, api_parameters)\n break;\n else:\n log.error('ORSE0002', response_code, retry_count, api_url, api_parameters)\n except Exception as e:\n log.error('ORSE0001', retry_count, e, api_url, api_parameters)\n if retry_count > 0:\n retry_count = retry_count - 1\n time.sleep(60)\n else:\n log.critical('ORSC0002', api_url, api_parameters)\n sys.exit(1)\n util.count_up_api_request()\n #time.sleep(0.5)\n return response\n\n def get_api_response_peppy(self, api_url, api_parameters):\n retry_count = 2\n for i in range(3):\n try:\n api_response = requests.get(api_url, params=api_parameters)\n response = api_response.json()\n # Check Ripple API's return code.\n log.debug('ORSD0003', api_url, api_parameters)\n break\n except Exception as e:\n log.error('ORSE0001', retry_count, e, api_url, api_parameters)\n if retry_count > 0:\n retry_count = retry_count - 1\n time.sleep(60)\n else:\n log.critical('ORSC0002', api_url, api_parameters)\n sys.exit(1)\n util.count_up_api_request()\n #time.sleep(0.5)\n return response\n" }, { "alpha_fraction": 0.6830986142158508, "alphanum_fraction": 0.6830986142158508, "avg_line_length": 16.75, "blob_id": "0c69726234805f12b5ce735ba169d1cdd455fc55", "content_id": "5c814185e2bda38a7a626dee3b5fa050ed5a8938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 142, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/sql/t_users_stats_monthly_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n COUNT(*) AS count\nFROM\n t_users_stats_monthly\nWHERE\n t_users_stats_monthly.month = '%s' AND\n t_users_stats_monthly.user_id = %s\n;\n" }, { "alpha_fraction": 0.6585366129875183, "alphanum_fraction": 0.6707317233085632, "avg_line_length": 10.714285850524902, "blob_id": "286c80dfaf307958730140f95ea4c3d424b401a7", "content_id": "390d8a20d152ba7a980ec9bce5cb10219645bfcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 82, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/sql/t_users_activity_S03.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n MAX(score) as score\nFROM\n t_users_activity\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.6682242751121521, "alphanum_fraction": 0.6682242751121521, "avg_line_length": 12.375, "blob_id": "f30e405b9955f80403ac05eba2fae1deb1b36713", "content_id": "79871cdb2226e54a129f238937219dee4ac85587", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 214, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/sql/t_users_scores_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n t_users_scores\nSELECT\n *\nFROM\n w_users_scores work\nWHERE\n work.user_id = %s\nAND NOT EXISTS(\n SELECT\n *\n FROM\n t_users_scores transaction\n WHERE\n transaction.score_id = work.score_id\n);\n" }, { "alpha_fraction": 0.49458298087120056, "alphanum_fraction": 0.501185953617096, "avg_line_length": 41.85439682006836, "blob_id": "f8915caf29959e19f3af50ee23d443d2cee48fea", "content_id": "f9b5a53d0c2c770a6450066c4b74ca2ad36425f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 15599, "license_type": "no_license", "max_line_length": 152, "num_lines": 364, "path": "/www/userpage.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php require_once('./functions.php') ?>\n<?php\n if (isset($_GET['u'])) { $user_id = $_GET['u']; }\n if (isset($_GET['m'])) { $mode_num = $_GET['m']; }\n $users_stats = get_users_stats($user_id, $mode_num);\n $username = $users_stats['username'];\n $global_leaderboard_rank = get_users_pp_rank_history($user_id, $mode_num);\n $playcount_chart = get_users_playcount_history($user_id, $mode_num);\n $replays_chart = get_users_replays_history($user_id, $mode_num);\n $ranked_score = $users_stats['ranked_score'];\n $accuracy = $users_stats['accuracy'];\n $playcount = $users_stats['playcount'];\n $total_score = $users_stats['total_score'];\n $level = $users_stats['level'];\n $total_hits = $users_stats['total_hits'];\n $max_combo = get_users_max_combo($user_id, $mode_num);\n $replays_watched = $users_stats['replays_watched'];\n $ranks_count = get_users_ranks_count($user_id, $mode_num);\n $registered_on = $users_stats['registered_on'];\n $registered_on_relative = get_datetime_diff($registered_on);\n $latest_activity = $users_stats['latest_activity'];\n $latest_activity_relative = get_datetime_diff($latest_activity);\n $play_style = $users_stats['play_style'];\n $country = $users_stats['country'];\n $count_ss = $ranks_count['ss'];\n $count_s = $ranks_count['s'];\n $count_a = $ranks_count['a'];\n?>\n<html>\n<head>\n <title><?php print($username); ?>'s Ripple Stats</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"semantic/dist/semantic.min.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\n <script\n src=\"https://code.jquery.com/jquery-3.1.1.min.js\"\n integrity=\"sha256-hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8=\"\n crossorigin=\"anonymous\"></script>\n <script src=\"semantic/dist/semantic.min.js\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.4.0/Chart.min.js\"></script>\n <script>\n $(function(){\n var key = '.ui.horizontal.two.column.grid.attached.segment.fp';\n var division = 10;\n var divlength = $('#first-place-rank-score '+key).length;\n dlsizePerResult = divlength / division;\n for(i = 1; i <= dlsizePerResult; i++) {\n $('#first-place-rank-score ' + key).eq(division * i - 1)\n .after('<div class=\"ui secondary attached segment right aligned more div'+i+'\"><button class=\"ui button active\">Show me more!</button>');\n }\n $('#first-place-rank-score ' + key + ', .ui.secondary.attached.segment.right.aligned.more').hide();\n for(j = 0; j < division; j++) {\n $('#first-place-rank-score ' + key).eq(j).show();\n }\n $('.ui.secondary.attached.segment.right.aligned.more.div1').show();\n $('.ui.secondary.attached.segment.right.aligned.more').click(function(){\n index = $(this).index('.ui.secondary.attached.segment.right.aligned.more');\n for(k = 0; k < (index + 2) * division; k++){\n $('#first-place-rank-score ' + key).eq(k).fadeIn();\n }\n $('.ui.secondary.attached.segment.right.aligned.more').hide();\n $('.ui.secondary.attached.segment.right.aligned.more').eq(index+1).show();\n });\n });\n $(function(){\n var key = '.ui.horizontal.two.column.grid.attached.segment.bp';\n var division = 10;\n var divlength = $('#best-performance-scores ' + key).length;\n dlsizePerResult = divlength / division;\n for(i = 1; i <= dlsizePerResult; i++) {\n $('#best-performance-scores ' + key).eq(division * i - 1)\n .after('<div class=\"ui secondary attached segment right aligned more div'+i+'\"><button class=\"ui button active\">Show me more!</button>');\n }\n $('#best-performance-scores ' + key + ', .ui.secondary.attached.segment.right.aligned.more').hide();\n for(j = 0; j < division; j++) {\n $('#best-performance-scores ' + key).eq(j).show();\n }\n $('.ui.secondary.attached.segment.right.aligned.more.div1').show();\n $('.ui.secondary.attached.segment.right.aligned.more').click(function(){\n index = $(this).index('.ui.secondary.attached.segment.right.aligned.more');\n for(k = 0; k < (index + 2) * division; k++){\n $('#best-performance-scores ' + key).eq(k).fadeIn();\n }\n $('.ui.secondary.attached.segment.right.aligned.more').hide();\n $('.ui.secondary.attached.segment.right.aligned.more').eq(index+1).show();\n });\n });\n </script>\n</head>\n<body>\n <div class=\"ui container\">\n <h1 class=\"ui header center aligned\"><?php print($username); ?>'s Ripple Stats</h1>\n <div class=\"ui two column grid\">\n <div class=\"four wide column segment\">\n <div class=\"ui segment top attached center aligned\">\n <img src=\"https://a.ripple.moe/<?php print($user_id);?>\" width=128px height=128px />\n </div>\n <div class=\"ui attached segment center aligned\">\n <h1><?php print($username); ?></h1>\n <i class=\"<?php print($country); ?> flag\"></i>\n <?php print_donor_badge($user_id); ?>\n </div>\n <div class=\"ui attached segment\">\n <p><i class=\"sign-in icon\"></i> <?php print($registered_on_relative); ?></p>\n <p><i class=\"sign-out icon\"></i> <?php print($latest_activity_relative); ?></p>\n </div>\n <div class=\"ui attached segment center aligned\">\n <?php\n $playstyle_array = get_playstyle_array($play_style);\n foreach ($playstyle_array as $playstyle) {\n if ($playstyle == 1) {\n print('<i class=\"big mouse pointer icon\"></i>');\n } else if ($playstyle == 2) {\n print('<i class=\"big tablet icon\"></i>');\n } else if ($playstyle == 4) {\n print('<i class=\"big keyboard icon\"></i>');\n } else if ($playstyle == 8) {\n print('<i class=\"big hand point up icon\"></i>');\n }\n }\n ?>\n </div>\n </div>\n <div class=\"twelve wide column segment\">\n <div class=\"ui four item menu\">\n <a class=\"item <?php if($mode_num == 0){ print('active'); } ?>\" href=\"/userpage.php?u=<?php print($user_id); ?>&m=0\">osu!</a>\n <a class=\"item <?php if($mode_num == 1){ print('active'); } ?>\" href=\"/userpage.php?u=<?php print($user_id); ?>&m=1\">Taiko</a>\n <a class=\"item <?php if($mode_num == 2){ print('active'); } ?>\" href=\"/userpage.php?u=<?php print($user_id); ?>&m=2\">CatchTheBeat</a>\n <a class=\"item <?php if($mode_num == 3){ print('active'); } ?>\" href=\"/userpage.php?u=<?php print($user_id); ?>&m=3\">osu!mania</a>\n </div>\n <div class=\"ui secondary inverted top attached segment\">\n <p>General</p>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>\n Performance: <?php print(number_format($users_stats['pp'])); ?>pp (#<?php print(number_format($users_stats['global_leaderboard_rank'])); ?>)\n <a href=\"https://ripple.moe/leaderboard?mode=0&p=1&country=<?php print(mb_strtolower($users_stats['country'])); ?>\">\n <i class=\"<?php print($country); ?> flag link\"></i>\n </a>\n #<?php print(number_format($users_stats['country_leaderboard_rank'])); ?>\n </p>\n </div>\n <div class=\"ui attached segment\">\n <canvas id=\"ppRankingChart\"></canvas>\n <script>\n var ctx = document.getElementById(\"ppRankingChart\").getContext('2d');\n var ppRankingChart = new Chart(ctx, {\n type: 'line',\n data: {\n labels: [<?php print_pp_chart_label($global_leaderboard_rank[0]); ?>],\n datasets: [\n {\n label: \"Performance Ranking\",\n borderColor: 'rgb(255, 128, 0)',\n lineTension: 0,\n fill: false,\n data: [<?php print_pp_chart_data($global_leaderboard_rank[1]); ?>],\n },\n ]\n },\n options: {\n responsive: true,\n legend: {\n display: false\n },\n scales: {\n xAxes: [{\n display: true,\n ticks: {\n callback: function(value) {return ((value % 30) == 0)? value + ' days ago' : ''},\n }\n }],\n yAxes: [{\n display: true,\n ticks: {\n reverse: true\n }\n }]\n },\n elements: {\n point: {\n radius: 0\n }\n }\n }\n });\n </script>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Recent Activity</p>\n </div>\n <div class=\"ui attached segment\">\n <div class=\"ui two column grid\">\n <?php print_users_activity($user_id, $username); ?>\n </div>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Detail Stats</p>\n </div>\n <div class=\"ui attached segment\">\n <p>Ranked Score: <?php print(number_format($ranked_score)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Hit Accuracy: <?php print(number_format($accuracy, 2).'%') ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Play Count: <?php print(number_format($playcount)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Total Score: <?php print(number_format($total_score)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Current Level: <?php print(number_format($level)) ?></p>\n <div class=\"ui indicating progress\">\n <div class=\"bar\">\n <div class=\"progress\" data-percent=\"74\" id=\"current-level\">74%</div>\n </div>\n </div>\n </div>\n <div class=\"ui attached segment\">\n <p>Total Hits: <?php print(number_format($total_hits)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Maximum Combo: <?php print(number_format($max_combo)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Replays Watched by Others: <?php print(number_format($replays_watched)) ?></p>\n </div>\n <div class=\"ui attached segment\">\n <p>Ranks</p>\n </div>\n <div class=\"ui horizontal eight column grid attached segment\">\n <div class=\"four wide column center middle aligned\"></div>\n <div class=\"one wide column center middle aligned\"><img src=\"/images/SS.png\" height=\"42\" /></div>\n <div class=\"one wide column center middle aligned\"><?php print($count_ss); ?></div>\n <div class=\"one wide column center middle aligned\"></div>\n <div class=\"one wide column center middle aligned\"><img src=\"/images/S.png\" height=\"42\" /></div>\n <div class=\"one wide column center middle aligned\"><?php print($count_s); ?></div>\n <div class=\"one wide column center middle aligned\"></div>\n <div class=\"one wide column center middle aligned\"><img src=\"/images/A.png\" height=\"42\" /></div>\n <div class=\"one wide column center middle aligned\"><?php print($count_a); ?></div>\n <div class=\"four wide column center middle aligned\"></div>\n </div>\n <div class=\"ui secondary inverted attached segment\">\n <p>Top Ranks</p>\n </div>\n <div class=\"ui attached segment\">\n <p>Best Performance</p>\n <div class=\"ui attached segment\" id=\"best-performance-scores\">\n <?php print_best_performance_scores($user_id, $mode_num); ?>\n </div>\n <p></p>\n <p>First Place Ranks</p>\n <div class=\"ui attached segments\" id=\"first-place-rank-score\">\n <?php print_first_place_ranks($user_id, $mode_num); ?>\n </div>\n </div>\n <div class=\"ui secondary inverted attached segment\">\n <p>Historical</p>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Play History</p>\n </div>\n <div class=\"ui attached segment\">\n <canvas id=\"PlaycountChart\" height=\"80px\"></canvas>\n <script>\n var ctx = document.getElementById(\"PlaycountChart\").getContext('2d');\n var PlaycountChart = new Chart(ctx, {\n type: 'line',\n data: {\n labels: [<?php print_playcount_chart_label($playcount_chart[0]); ?>],\n datasets: [\n {\n borderColor: 'rgb(128, 128, 255)',\n lineTension: 0,\n fill: true,\n data: [<?php print_playcount_chart_data($playcount_chart[1]); ?>],\n },\n ]\n },\n options: {\n responsive: true,\n legend: {\n display: false\n },\n scales: {\n yAxes: [{\n ticks: {\n beginAtZero: true,\n userCallback: function(label, index, labels) {\n if (Math.floor(label) === label) {\n return label;\n }\n }\n }\n }]\n }\n }\n });\n </script>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Most Passed Beatmaps</p>\n </div>\n <div class=\"ui attached segment\">\n <?php print_users_most_passed_beatmaps($user_id, $mode_num); ?>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Recent Plays</p>\n </div>\n <div class=\"ui attached segment\">\n <?php print_users_recent_plays($user_id, $mode_num); ?>\n </div>\n <div class=\"ui secondary attached segment\">\n <p>Replays Watched History</p>\n </div>\n <div class=\"ui attached segment\">\n <canvas id=\"ReplaysChart\" height=\"80px\"></canvas>\n <script>\n var ctx = document.getElementById(\"ReplaysChart\").getContext('2d');\n var ReplaysChart = new Chart(ctx, {\n type: 'line',\n data: {\n labels: [<?php print_replays_chart_label($replays_chart[0]); ?>],\n datasets: [\n {\n label: \"Chart-1\",\n borderColor: 'rgb(255, 128, 0)',\n lineTension: 0,\n fill: true,\n data: [<?php print_replays_chart_label($replays_chart[1]); ?>],\n },\n ]\n },\n options: {\n responsive: true,\n legend: {\n display: false\n },\n scales: {\n yAxes: [{\n ticks: {\n beginAtZero: true,\n userCallback: function(label, index, labels) {\n if (Math.floor(label) === label) {\n return label;\n }\n }\n }\n }]\n }\n }\n });\n </script>\n </div>\n <div class=\"ui secondary inverted attached segment\">\n <p>Achievements</p>\n </div>\n <div class=\"ui bottom attached segment\">\n </div>\n </div>\n </div>\n </div>\n</body>\n</html>\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 9.399999618530273, "blob_id": "b0c63f4051d29e82996febf6cdb10b1b923212e0", "content_id": "e3eb1e4524fc78d6ab0bbbbff4c081f8921f0453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 52, "license_type": "no_license", "max_line_length": 16, "num_lines": 5, "path": "/sql/w_users_badges_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n w_users_badges\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.6657223701477051, "alphanum_fraction": 0.6657223701477051, "avg_line_length": 13.708333015441895, "blob_id": "d96f1a89738ac23efb097bd7fd36558ebd64c8e8", "content_id": "5ab394301291f31a30c3ce9f29d7bfd1c7644ff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 353, "license_type": "no_license", "max_line_length": 46, "num_lines": 24, "path": "/sql/t_users_scores_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_scores transaction\nWHERE NOT EXISTS(\n SELECT\n *\n FROM\n m_users_scores master\n WHERE\n master.score_id = transaction.score_id AND\n transaction.user_id = %s\n) AND NOT EXISTS (\n SELECT\n *\n FROM\n l_scores_on_master list\n WHERE\n list.score_id = transaction.score_id\n)\nORDER BY\n transaction.score_id\nASC\n;\n" }, { "alpha_fraction": 0.42611682415008545, "alphanum_fraction": 0.4295532703399658, "avg_line_length": 8.699999809265137, "blob_id": "d1816519fbf966dc0e0eefacc87e1ba97b34afff", "content_id": "5e0e9afa590a7950ddcde71c2aebfe92f1029625", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 291, "license_type": "no_license", "max_line_length": 19, "num_lines": 30, "path": "/sql/t_users_activity_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n t_users_activity(\n user_id,\n score_id,\n score,\n beatmap_id,\n beatmap_md5,\n song_name,\n ranking,\n type,\n mode,\n rank,\n archived_on,\n created_on\n )\nVALUES (\n %s,\n %s,\n %s,\n %s,\n '%s',\n '%s',\n %s,\n %s,\n %s,\n '%s',\n '%s',\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.6648530960083008, "alphanum_fraction": 0.6779107451438904, "avg_line_length": 35.039215087890625, "blob_id": "2cdea2f44d28056117f67fdd082d27970b10d91f", "content_id": "d7309686ce3bae50f46dbbb7fd697e8de82e1e75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1838, "license_type": "no_license", "max_line_length": 103, "num_lines": 51, "path": "/ors/script/util.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import configparser\nfrom datetime import datetime\nfrom ors.script import converter\nimport pytz\n\ndef read_config():\n config_file_path = 'conf/ors.conf'\n config_object = configparser.ConfigParser()\n config_object.read(config_file_path, 'utf-8')\n return config_object\n\ndef datetime_now():\n return datetime.now(pytz.timezone('UTC'))\n\ndef datetime_now_str():\n return datetime.now(pytz.timezone('UTC')).strftime('%Y-%m-%d %H:%M:%S')\n\ndef get_beatmap_info(beatmap_md5, mode):\n from ors.script.database import Database\n database = Database()\n connection = database.get_connection()\n result = database.execute_statement(connection, 'm_beatmaps_S01', beatmap_md5)\n count = result[0]\n if count == 0:\n from ors.script.ripple_api import RippleApi\n ripple_api = RippleApi()\n beatmap_info = ripple_api.get_beatmap_info(beatmap_md5, mode)\n beatmap_info = converter.convert_beatmap_peppy(beatmap_info[0])\n result = database.execute_statement_values(connection, 'm_beatmaps_I01', beatmap_info.values())\n connection.commit()\n connection.close()\n return beatmap_info\n else:\n return result[1][0]\n\ndef count_up_api_request():\n from ors.script.database import Database\n from ors.script import util\n database = Database()\n connection = database.get_connection()\n now = util.datetime_now_str()\n result = database.execute_statement(connection, 's_api_request_count_tick_S01', now)\n is_exists = result[0]\n if is_exists == 0:\n result = database.execute_statement(connection, 's_api_request_count_tick_I01', now)\n else:\n count = result[1][0]['count']\n count = count + 1\n result = database.execute_statement(connection, 's_api_request_count_tick_U01', count, now)\n connection.commit()\n connection.close()\n" }, { "alpha_fraction": 0.621004581451416, "alphanum_fraction": 0.621004581451416, "avg_line_length": 15.84615421295166, "blob_id": "389abb779f86ade0b9a190fbb72f02bdc0d4ab9c", "content_id": "cb1e8d291b75f0cdf0235903f5842e6550fbdff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 219, "license_type": "no_license", "max_line_length": 51, "num_lines": 13, "path": "/sql/t_users_stats_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n date_format(created_on, '%%Y-%%m') AS created_on,\n COUNT(*) AS count\nFROM\n t_users_stats\nWHERE\n user_id = %s\nGROUP BY\n date_format(created_on, '%%Y-%%m')\nORDER BY\n date_format(created_on, '%%Y-%%m')\nASC\n;\n" }, { "alpha_fraction": 0.6075630187988281, "alphanum_fraction": 0.6260504126548767, "avg_line_length": 42.272727966308594, "blob_id": "cc5f9a46d5f52a25c3aefe1b540cab7a07059b3a", "content_id": "d7d274928df0522993af245b47c88f6ee4eb9f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2380, "license_type": "no_license", "max_line_length": 141, "num_lines": 55, "path": "/ors/main/beatmap_master.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.beatmap_master import BeatmapMaster\n BeatmapMaster().execute()\n\nclass BeatmapMaster(object):\n global log\n global database\n global connection\n log = logger.logger('beatmap_master')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'BeatmapMaster')\n self.__set_beatmap_master()\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'BeatmapMaster')\n except Exception as e:\n log.critical('ORSC0001', 'BeatmapMaster', e)\n raise Exception(e)\n\n def __set_beatmap_master(self):\n # Insert new beatmaps from work to master.\n result = database.execute_statement(connection, 'm_beatmaps_S03')\n count = result[0]\n new_beatmaps = result[1]\n log.info('ORSI0008', count)\n for new_beatmap in new_beatmaps:\n del new_beatmap['updated_on']\n song_name = new_beatmap['song_name']\n new_beatmap['song_name'] = new_beatmap['song_name'].replace('\\'', '\\\\\\'')\n result = database.execute_statement_values(connection, 'm_beatmaps_I01', new_beatmap.values())\n log.debug('ORSD0008', song_name, new_beatmap['beatmap_id'], new_beatmap['beatmap_md5'])\n # Insert updated beatmaps from work to master\n result = database.execute_statement(connection, 'm_beatmaps_S04')\n count = result[0]\n updated_beatmaps = result[1]\n log.info('ORSI0009', count)\n for updated_beatmap in updated_beatmaps:\n del updated_beatmap['created_on']\n del updated_beatmap['updated_on']\n song_name = updated_beatmap['song_name']\n updated_beatmap.update(beatmap_md5_key=updated_beatmap['beatmap_md5'])\n song_name = updated_beatmap['song_name']\n updated_beatmap['song_name'] = updated_beatmap['song_name'].replace('\\'', '\\\\\\'')\n result = database.execute_statement_values(connection, 'm_beatmaps_U01', updated_beatmap.values())\n log.debug('ORSD0009', song_name, updated_beatmap['beatmap_id'], updated_beatmap['beatmap_md5'], updated_beatmap['latest_update'])\n" }, { "alpha_fraction": 0.5755693316459656, "alphanum_fraction": 0.5954106450080872, "avg_line_length": 54.20000076293945, "blob_id": "c65901e9693a636816a510238859cde486204d40", "content_id": "59638ec2d3ce3acd5405ec7feb448f2c4ee73216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5796, "license_type": "no_license", "max_line_length": 146, "num_lines": 105, "path": "/ors/main/first_place_master.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script.ripple_api import RippleApi\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.first_place_master import FirstPlaceMaster\n FirstPlaceMaster().execute()\n\nclass FirstPlaceMaster(object):\n global log\n global database\n global connection\n log = logger.logger('first_place_master')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'FirstPlaceMaster')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n self.__check_first_place(user_id)\n self.__set_first_place(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'FirstPlaceMaster')\n except Exception as e:\n log.critical('ORSC0001', 'FirstPlaceMaster', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __check_first_place(self, user_id):\n ripple_api = RippleApi()\n result = database.execute_statement(connection, 'm_first_place_S02', user_id)\n log.info('ORSI0011', result[0], user_id)\n first_place_scores = result[1]\n not_changed_counter = 0\n lost_counter = 0\n for first_place_score in first_place_scores:\n beatmap_md5 = first_place_score['beatmap_md5']\n mode = first_place_score['play_mode']\n leaderboard_scores = ripple_api.get_leaderboard(beatmap_md5, mode)\n first_place_now_score_id = leaderboard_scores['scores'][0]['id']\n # NEED TEST HERE.\n if first_place_now_score_id != first_place_score['score_id']:\n # The case lost first place.\n result = database.execute_statement(connection, 'm_beatmaps_S02', beatmap_md5)\n beatmap_id = result[1][0]['beatmap_id']\n song_name = result[1][0]['song_name']\n del first_place_score['time']\n first_place_score['time'] = leaderboard_scores['scores'][0]['time']\n first_place_score = converter.convert_first_place_score(first_place_score)\n activity = converter.convert_activity(first_place_score, beatmap_id, song_name, -1)\n result = database.execute_statement_values(connection, 't_users_activity_I01', activity.values())\n result = database.execute_statement(connection, 'm_first_place_D01', first_place_score['score_id'])\n log.debug('ORSD0017', user_id, first_place_score['score_id'], song_name, first_place_score['score'], first_place_score['rank'])\n lost_counter = lost_counter + 1\n else:\n result = database.execute_statement(connection, 'm_beatmaps_S02', beatmap_md5)\n song_name = result[1][0]['song_name']\n log.debug('ORSD0018', user_id, first_place_score['score_id'], song_name, first_place_score['score'], first_place_score['rank'])\n not_changed_counter = not_changed_counter + 1\n log.info('ORSI0013', user_id, not_changed_counter, lost_counter)\n\n def __set_first_place(self, user_id):\n result = database.execute_statement(connection, 't_users_activity_S04', user_id)\n activity_score_infos = result[1]\n for activity_score_info in activity_score_infos:\n activity_score_id = activity_score_info['score_id']\n activity_beatmap_md5 = activity_score_info['beatmap_md5']\n # Check updated score.\n result = database.execute_statement(connection, 'm_first_place_S01', activity_beatmap_md5)\n count = result[0]\n if count == 0:\n result = database.execute_statement(connection, 't_users_scores_S02', activity_score_id)\n score = result[1][0]\n del score['updated_on']\n result = database.execute_statement_values(connection, 'm_first_place_I01', score.values())\n result = database.execute_statement(connection, 'l_scores_on_first_place_I01', user_id, score['score_id'], 1, score['created_on'])\n result = database.execute_statement(connection, 'm_beatmaps_S01', score['beatmap_md5'])\n song_name = result[1][0]['song_name']\n log.debug('ORSD0015', user_id, activity_score_id, song_name, score['score'], score['rank'])\n else:\n if activity_score_id != result[1][0]['score_id']:\n result = database.execute_statement(connection, 't_users_scores_S02', activity_score_id)\n score = result[1][0]\n created_on = score['created_on']\n del score['created_on']\n del score['updated_on']\n score.update(beatmap_md5_key=score['beatmap_md5'])\n result = database.execute_statement_values(connection, 'm_first_place_U01', score.values())\n result = database.execute_statement(connection, 'l_scores_on_first_place_I01', user_id, score['score_id'], 2, created_on)\n result = database.execute_statement(connection, 'm_beatmaps_S01', score['beatmap_md5'])\n song_name = result[1][0]['song_name']\n log.debug('ORSD0016', user_id, activity_score_id, song_name, score['score'], score['rank'])\n else:\n result = database.execute_statement(connection, 'l_scores_on_first_place_I01', user_id, score['score_id'], 3, created_on)\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 9.199999809265137, "blob_id": "b6f790cd15175b023df5d3ab6f6d1b5c0ff8f6f9", "content_id": "dd9631b4526399358f5a3746c6bdfeac3c5436d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 51, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/sql/w_users_stats_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n w_users_stats\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.5424200296401978, "alphanum_fraction": 0.5424200296401978, "avg_line_length": 12.566038131713867, "blob_id": "202236b71d343d95f496a715dd181aef5a20f0e9", "content_id": "c07f2c3192177112ba152614e975df1482aad065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 35, "num_lines": 106, "path": "/sql/w_users_stats_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n w_users_stats(\n user_id,\n username,\n username_aka,\n registered_on,\n privileges,\n latest_activity,\n country,\n ranked_score_std,\n total_score_std,\n playcount_std,\n replays_watched_std,\n total_hits_std,\n level_std,\n accuracy_std,\n pp_std,\n global_leaderboard_rank_std,\n country_leaderboard_rank_std,\n ranked_score_taiko,\n total_score_taiko,\n playcount_taiko,\n replays_watched_taiko,\n total_hits_taiko,\n level_taiko,\n accuracy_taiko,\n pp_taiko,\n global_leaderboard_rank_taiko,\n country_leaderboard_rank_taiko,\n ranked_score_ctb,\n total_score_ctb,\n playcount_ctb,\n replays_watched_ctb,\n total_hits_ctb,\n level_ctb,\n accuracy_ctb,\n pp_ctb,\n global_leaderboard_rank_ctb,\n country_leaderboard_rank_ctb,\n ranked_score_mania,\n total_score_mania,\n playcount_mania,\n replays_watched_mania,\n total_hits_mania,\n level_mania,\n accuracy_mania,\n pp_mania,\n global_leaderboard_rank_mania,\n country_leaderboard_rank_mania,\n play_style,\n favourite_mode,\n created_on\n )\nVALUES (\n %s,\n '%s',\n '%s',\n '%s',\n %s,\n '%s',\n '%s',\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.4079822599887848, "alphanum_fraction": 0.42793792486190796, "avg_line_length": 8.80434799194336, "blob_id": "2ee7d668e5fd1d9d8ccc3d040c27cf569cc9ed72", "content_id": "531b5c32202ef2784b6afbf5990bfe3040048940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 451, "license_type": "no_license", "max_line_length": 18, "num_lines": 46, "path": "/sql/w_users_scores_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n w_users_scores(\n user_id,\n score_id,\n beatmap_md5,\n max_combo,\n score,\n is_full_combo,\n mods,\n count_300,\n count_100,\n count_50,\n count_geki,\n count_katu,\n count_miss,\n time,\n play_mode,\n accuracy,\n pp,\n rank,\n completed,\n created_on\n )\nVALUES(\n %s,\n %s,\n '%s',\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n '%s',\n %s,\n %s,\n %s,\n '%s',\n %s,\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.6125146746635437, "alphanum_fraction": 0.6298472285270691, "avg_line_length": 44.38666534423828, "blob_id": "049ecb4b1350c61d2832dfa0cb1ce5dc4ee31dd2", "content_id": "f1f9928a111bf04e238baf65204d2ac42e26910d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3404, "license_type": "no_license", "max_line_length": 119, "num_lines": 75, "path": "/ors/main/users_stats_work.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.ripple_api import RippleApi\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_stats_work import UsersStatsWork\n UsersStatsWork().execute()\n\nclass UsersStatsWork(object):\n global log\n global database\n global connection\n log = logger.logger('users_stats_work')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersStatsWork')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n users_stats = self.__get_users_stats(user_id)\n self.__set_users_stats_work(users_stats)\n self.__set_users_badge_work(user_id, users_stats)\n self.__set_users_silence_info_work(user_id, users_stats)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersStatsWork')\n except Exception as e:\n log.critical('ORSC0001', 'UsersStatsWork', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __get_users_stats(self, user_id):\n ripple_api = RippleApi()\n users_stats = ripple_api.get_users_full(user_id)\n return users_stats\n\n def __set_users_stats_work(self, users_stats):\n users_stats = converter.convert_users_stats(users_stats)\n user_id = users_stats['user_id']\n result = database.execute_statement(connection, 'w_users_stats_D01', user_id)\n log.debug('ORSD0001', 'w_users_stats', result[0], user_id)\n result = database.execute_statement_values(connection, 'w_users_stats_I01', users_stats.values())\n log.debug('ORSD0002', 'w_users_stats', result[0], user_id)\n\n def __set_users_badge_work(self, user_id, users_stats):\n result = database.execute_statement(connection, 'w_users_badges_D01', user_id)\n log.debug('ORSD0001', 'w_users_badges', result[0], user_id)\n users_badges = users_stats['badges']\n users_custom_badge = users_stats['custom_badge']\n if users_badges != None:\n if users_custom_badge != None:\n users_custom_badge['id'] = 0\n users_badges.append(users_custom_badge)\n for users_badge in users_badges:\n users_badge = converter.convert_users_badge(user_id, users_badge)\n result = database.execute_statement_values(connection, 'w_users_badges_I01', users_badge.values())\n log.debug('ORSD0002', 'w_users_badges', result[0], user_id)\n\n def __set_users_silence_info_work(self, user_id, users_stats):\n users_silence_info = users_stats['silence_info']\n result = database.execute_statement(connection, 'w_users_silence_info_D01', user_id)\n log.debug('ORSD0001', 'w_users_silence_info', result[0], user_id)\n users_silence_info = converter.convert_users_silence_info(user_id, users_silence_info)\n result = database.execute_statement_values(connection, 'w_users_silence_info_I01', users_silence_info.values())\n log.debug('ORSD0002', 'w_users_silenfe_info', result[0], user_id)\n" }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.6351351141929626, "avg_line_length": 9.571428298950195, "blob_id": "f6fab7e7ec6e0833f62d54347e769366ced97523", "content_id": "9c4cd4210145609e5a0c003d2318b387f9373c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 74, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/sql/t_users_scores_U02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n t_users_scores\nSET\n is_on_activity = %s\nWHERE\n score_id = %s\n;\n" }, { "alpha_fraction": 0.669767439365387, "alphanum_fraction": 0.669767439365387, "avg_line_length": 12.4375, "blob_id": "f580fd6300a3350940550e0871c4da51f9e9b482", "content_id": "195af3d0628889716fc674d4a7caf83ac611ce93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 215, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/sql/t_users_stats_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n t_users_stats\nSELECT\n *\nFROM\n w_users_stats work\nWHERE\n work.user_id = %s\nAND NOT EXISTS(\n SELECT\n *\n FROM\n t_users_stats transaction\n WHERE\n transaction.created_on = work.created_on\n);\n" }, { "alpha_fraction": 0.518796980381012, "alphanum_fraction": 0.518796980381012, "avg_line_length": 11.090909004211426, "blob_id": "dc8dab60eb80bfe3e7571d0fd8d48a5d604e7e4e", "content_id": "ea42e75c1893456d3bbfcf05fa7ef6aa60cfc585", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 133, "license_type": "no_license", "max_line_length": 18, "num_lines": 11, "path": "/sql/m_users_badges_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n m_users_badges\nSET\n user_id = %s,\n badge_id = %s,\n name = '%s',\n icon = '%s'\nWHERE\n user_id = %s AND\n badge_id = %s\n;\n" }, { "alpha_fraction": 0.6376811861991882, "alphanum_fraction": 0.6376811861991882, "avg_line_length": 8.857142448425293, "blob_id": "e436566b1fd5d6ef572261004f6fe06fcca0deec", "content_id": "693991e114ecaad50d11f2325fd262203d6de2e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 69, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/sql/m_users_badges_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n COUNT(*) count\nFROM\n m_users_badges\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10, "blob_id": "0d562e07418ad8b8dc204a25c70ced33916ccf9e", "content_id": "06e6391f8405f47ab25e31d85a5118a5219f60f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 77, "license_type": "no_license", "max_line_length": 26, "num_lines": 7, "path": "/sql/s_api_request_count_tick_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n s_api_request_count_tick\nSET\n count = %s\nWHERE\n datetime = '%s'\n;\n" }, { "alpha_fraction": 0.5914068818092346, "alphanum_fraction": 0.5977253317832947, "avg_line_length": 39.23728942871094, "blob_id": "0dc156ce7f7ae5851ff4d6ceb539726426de3e83", "content_id": "0a47b17329e34ebdea249a2d27f21b229a08235f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 374, "num_lines": 59, "path": "/www/first_place.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n function printFirstPlaceStats($score_id, $beatmap_id, $rank, $song_name, $mods, $accuracy, $time, $pp) {\n print('<tr>');\n print('<td><img src=\"/images/'.$rank.'.png\" width=\"12\" height=\"15\" style=\"padding-right: 8px\"><a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.' ('.sprintf('%0.2f', $accuracy).'%)</a></td>');\n print('<td style=\"text-align: right\">'.sprintf('%d', $pp).'pp</td>');\n print('</tr>');\n print('<tr>');\n print('<td>'.$time.'</td>');\n print('<td style=\"text-align: right\"><a href=\"https://ripple.moe/web/replays/'.$score_id.'\">โ˜…</a></td>');\n print('</tr>');\n }\n\n function decToBits($dec) {\n $bin = decbin($dec);\n $bits = str_split($bin);\n $bits = array_reverse($bits);\n $bits = array_filter($bits);\n\n foreach ( $bits as $pos => $bit ) {\n $bits[$pos] = pow(2, $pos);\n }\n\n $bits = array_values($bits);\n\n return $bits;\n }\n print(var_dump(decToBits(17456)));\n\n $database_config = parse_ini_file('../conf/database.conf');\n $db_dbname = $database_config['dbname'];\n $db_host = $database_config['host'];\n $db_port = $database_config['port'];\n $db_charset = $database_config['charset'];\n $db_user = $database_config['user'];\n $db_password = $database_config['password'];\n\n # Connect to database.\n $dsn = 'mysql:dbname='.$db_dbname.'; host='.$db_host.'; port='.$db_port.'; charset='.$db_charset;\n $pdo = new PDO($dsn, $db_user, $db_password);\n\n $query = 'SELECT m_first_place.score_id AS score_id, m_first_place.mods AS mods, m_first_place.rank AS rank, m_first_place.time AS time, m_first_place.accuracy AS accuracy, m_first_place.pp AS pp, m_beatmaps.song_name, m_beatmaps.beatmap_id as beatmap_id FROM m_first_place INNER JOIN m_beatmaps ON m_first_place.beatmap_md5 = m_beatmaps.beatmap_md5 ORDER BY time DESC;';\n\n $statement = $pdo -> prepare($query);\n $statement -> execute();\n\n print('<table class=\"ui table score-table orange\">');\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $rank = $row['rank'];\n $song_name_full = $row['song_name'];\n $mods = $row['mods'];\n $accuracy = $row['accuracy'];\n $time = $row['time'];\n $pp = $row['pp'];\n $score_id = $row['score_id'];\n $beatmap_id = $row['beatmap_id'];\n printFirstPlaceStats($score_id, $beatmap_id, $rank, $song_name_full, $mods, $accuracy, $time, $pp);\n }\n print('</table>');\n?>\n" }, { "alpha_fraction": 0.4656488597393036, "alphanum_fraction": 0.4656488597393036, "avg_line_length": 8.357142448425293, "blob_id": "61a590e5027e11c7d9dd3f60e3df416ba47e2bf6", "content_id": "0e4a1b0d126d78499bb5943680c93e43caf24ef8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 131, "license_type": "no_license", "max_line_length": 23, "num_lines": 14, "path": "/sql/w_users_silence_info_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n w_users_silence_info(\n user_id,\n reason,\n end,\n created_on\n )\nVALUES (\n %s,\n '%s',\n '%s',\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.554347813129425, "avg_line_length": 9.222222328186035, "blob_id": "ca88729cd5ea54427c4fc52dc8db25c2138c1801", "content_id": "b0a5ebe3c18f0386d1c1cbe213679f6d5836b970", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 92, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/sql/s_api_request_count_tick_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n s_api_request_count_tick(\n datetime,\n count\n )\nVALUES (\n '%s',\n 1\n);\n" }, { "alpha_fraction": 0.47999998927116394, "alphanum_fraction": 0.47999998927116394, "avg_line_length": 8.090909004211426, "blob_id": "d6896e1c7af589fb2ae15e858657d60b9dcf1b76", "content_id": "0a3af5507827f147c3216fd4020a4790f8224520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 100, "license_type": "no_license", "max_line_length": 14, "num_lines": 11, "path": "/sql/m_users_002.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n m_users(\n user_id,\n is_enable,\n created_on\n )\nVALUES(\n %s,\n %s,\n '%s'\n);\n" }, { "alpha_fraction": 0.6197183132171631, "alphanum_fraction": 0.6338028311729431, "avg_line_length": 9.142857551574707, "blob_id": "f7ebee1f87a8422c4d2d69cd265b1c3b869fb16b", "content_id": "738267b93282b82539d92411729be4521085e61a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 71, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/sql/t_users_scores_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n t_users_scores\nSET\n is_on_master = 1\nWHERE\n score_id = %s\n;\n" }, { "alpha_fraction": 0.6208178400993347, "alphanum_fraction": 0.6208178400993347, "avg_line_length": 15.8125, "blob_id": "f9e2d8f6d7c450310265ce9fd8d02ef478fa7447", "content_id": "25b084a74f0ef9d016da0785891afc668c819637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 269, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/sql/m_beatmaps_S04.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n w_beatmaps work\nWHERE\n work.beatmap_id = (\n SELECT\n beatmap_id\n FROM\n m_beatmaps master\n WHERE\n master.beatmap_id = work.beatmap_id AND\n master.latest_update < work.latest_update\n GROUP BY\n master.beatmap_id\n );\n" }, { "alpha_fraction": 0.5412843823432922, "alphanum_fraction": 0.5412843823432922, "avg_line_length": 11.823529243469238, "blob_id": "94d7b33b4688f708e98bb197db75e7474f845d86", "content_id": "a73419a472a47e1b41d0abacd75bb636e9e2ca67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 218, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/sql/t_users_stats_monthly_S03.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_stats\nWHERE\n t_users_stats.created_on = (\n SELECT\n MIN(created_on)\n FROM\n t_users_stats\n WHERE\n created_on like '%s%%' AND\n user_id = %s\n )\nAND\n user_id = %s\n;\n" }, { "alpha_fraction": 0.5574154853820801, "alphanum_fraction": 0.5651155114173889, "avg_line_length": 35.42683029174805, "blob_id": "d3dd3e21cecce1c531bf8f7ecc596ab4ddb24237", "content_id": "b1cdff6ef5249e3cdd91e296ec10e1c0f1e494e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2987, "license_type": "no_license", "max_line_length": 115, "num_lines": 82, "path": "/ors/script/database.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import re\nimport sys\nimport pymysql\nfrom datetime import datetime\nfrom ors.script import util\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.script import Database\n Database().execute()\n\nclass Database(object):\n global log\n log = logger.logger('database')\n\n def get_connection(self):\n config = util.read_config()\n host = config['database']['host']\n db = config['database']['db']\n user = config['database']['user']\n password = config['database']['password']\n charset = config['database']['charset']\n try:\n connection = pymysql.connect(\n host=host,\n db=db,\n user=user,\n password=password,\n charset=charset,\n cursorclass=pymysql.cursors.DictCursor\n )\n except Exception as e:\n log.critical('ORSC0003', e)\n sys.exit(1)\n return connection\n\n def execute_statement(self, connection, sql_name, *parameters):\n sql_path = 'sql/' + sql_name + '.sql'\n sql_file = open(sql_path, 'r')\n sql = sql_file.read().format()\n statement = sql % parameters\n try:\n cursor = connection.cursor()\n start_time = datetime.now()\n count = cursor.execute(statement)\n end_time = datetime.now()\n process_time = end_time - start_time\n process_time = \"{0:.6f}\".format(process_time.total_seconds())\n log.info('ORSI0014', sql_name, process_time)\n result = cursor.fetchall()\n cursor.close()\n except Exception as e:\n # When failed to commit statement, rollback table and system abnormal end.\n connection.rollback()\n log.critical('ORSC0004', 'execute_statement', e, sql_name, self.__compress_statement(statement))\n sys.exit(1)\n return [count, result]\n\n def execute_statement_values(self, connection, sql_name, values):\n sql_path = 'sql/' + sql_name + '.sql'\n sql_file = open(sql_path, 'r')\n sql = sql_file.read().format()\n statement = sql % tuple(values)\n try:\n cursor = connection.cursor()\n start_time = datetime.now()\n count = cursor.execute(statement)\n end_time = datetime.now()\n process_time = end_time - start_time\n process_time = \"{0:.6f}\".format(process_time.total_seconds())\n result = cursor.fetchall()\n cursor.close()\n except Exception as e:\n # When failed to commit statement, rollback table and system abnormal end.\n connection.rollback()\n log.critical('ORSC0004', 'execute_statement_values', e, sql_name, self.__compress_statement(statement))\n sys.exit(1)\n return [count, result]\n\n def __compress_statement(self, statement):\n statement = re.sub(r\"\\s+\", \" \", statement)\n return statement\n" }, { "alpha_fraction": 0.6323529481887817, "alphanum_fraction": 0.6323529481887817, "avg_line_length": 8.714285850524902, "blob_id": "d9034015d8aa7817f6ab7abf878cdf2b1f68fd16", "content_id": "e06fb6ce7991483b8373da50ca49d3f2b059ff2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 68, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/sql/m_users_stats_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n COUNT(*) count\nFROM\n m_users_stats\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.5481927990913391, "alphanum_fraction": 0.5481927990913391, "avg_line_length": 10.857142448425293, "blob_id": "5442d0fd1c516a5d2f2ae6c4b6ccee35facb3b14", "content_id": "2244e2465ba2dd91b0628dff7e7e3c6d59b889da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 166, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/sql/t_users_stats_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_stats\nWHERE\n created_on = (\n SELECT\n MAX(created_on)\n FROM\n t_users_stats\n WHERE\n t_users_stats.user_id = %s\n )\n;\n" }, { "alpha_fraction": 0.5802469253540039, "alphanum_fraction": 0.5925925970077515, "avg_line_length": 9.125, "blob_id": "f4d12e08c4e92fa427557ee33dc1cdf4ec5d751f", "content_id": "17fc5e535a665c8fc9ff83e91d45ff2fc4a1ae69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 81, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/sql/m_users_scores_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n m_users_scores\nWHERE\n user_id = %s AND\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.5391174554824829, "alphanum_fraction": 0.5418887138366699, "avg_line_length": 31.35172462463379, "blob_id": "afa8a5e8e23c89657febf1b40b1ea02b1500b7e2", "content_id": "96b1ef3503aa3a5c0f029aaeb57043f74d88c297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4691, "license_type": "no_license", "max_line_length": 148, "num_lines": 145, "path": "/www/manage/functions.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n function require_unlogined_session() {\n @session_start();\n if (isset($_SESSION['username'])) {\n header('Location: /manage/portal.php');\n exit;\n }\n }\n\n function require_logined_session() {\n @session_start();\n if (! isset($_SESSION['username'])) {\n header('Location: /manage/login.php');\n exit;\n }\n }\n\n function generate_token() {\n return hash('sha256', session_id());\n }\n\n function validate_token($token) {\n return $token === generate_token();\n }\n\n function h($str) {\n return htmlspecialchars($str, ENT_QUOTES, 'UTF-8');\n }\n\n function get_pdo(){\n $database_config = parse_ini_file('../../conf/database.conf');\n $db_dbname = $database_config['dbname'];\n $db_host = $database_config['host'];\n $db_port = $database_config['port'];\n $db_charset = $database_config['charset'];\n $db_user = $database_config['user'];\n $db_password = $database_config['password'];\n # Connect to database.\n $dsn = 'mysql:dbname='.$db_dbname.'; host='.$db_host.'; port='.$db_port.'; charset='.$db_charset;\n $pdo = new PDO($dsn, $db_user, $db_password);\n return $pdo;\n }\n\n function execute_score_search($score_id) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM t_users_scores INNER JOIN m_beatmaps ON t_users_scores.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE score_id = :score_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':score_id' => $score_id]);\n $score_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($score_data) {\n $user_id = $score_data['user_id'];\n $score_id = $score_data['score_id'];\n $mods = $score_data['mods'];\n $rank = $score_data['rank'];\n $time = $score_data['time'];\n $accuracy = $score_data['accuracy'];\n $pp = $score_data['pp'];\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($score_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>score_id not found.</b>');\n }\n }\n\n function execute_beatmap_search_md5($beatmap_md5) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM m_beatmaps WHERE beatmap_md5 = :beatmap_md5';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':beatmap_md5' => $beatmap_md5]);\n $beatmap_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($beatmap_data) {\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($beatmap_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>beatmap_md5 not found.</b>');\n }\n }\n\n function execute_beatmap_search_id($beatmap_id) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM m_beatmaps WHERE beatmap_id = :beatmap_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':beatmap_id' => $beatmap_id]);\n $beatmap_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($beatmap_data) {\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($beatmap_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>beatmap_id not found.</b>');\n }\n }\n\n function execute_user_search_id($user_id) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM m_users_stats WHERE user_id = :user_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $user_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($user_data) {\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($user_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>user_id not found.</b>');\n }\n }\n\n function execute_user_search_name($username) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM m_users_stats WHERE username = :username';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':username' => $username]);\n $user_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($user_data) {\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($user_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>username not found.</b>');\n }\n }\n?>\n" }, { "alpha_fraction": 0.637499988079071, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 9, "blob_id": "ec37605161f3255f0094dffb0e20e86bb78fe59f", "content_id": "70a18c782800064d8807e191bab174c81f0bc9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 80, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/sql/m_beatmaps_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n beatmap_id,\n song_name\nFROM\n m_beatmaps\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.532142162322998, "alphanum_fraction": 0.5462689399719238, "avg_line_length": 44.44966506958008, "blob_id": "243e6cc53dacac39d223b6b81cdc4eb25e8d4476", "content_id": "5de01613ecdebf75a9f50bc45ec8a16043fd8709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 20316, "license_type": "no_license", "max_line_length": 473, "num_lines": 447, "path": "/www/functions.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n function get_mode_name_short($mode) {\n if ($mode == 0) { return 'std'; }\n else if ($mode == 1) { return 'taiko'; }\n else if ($mode == 2) { return 'ctb'; }\n else if ($mode == 3) { return 'mania'; }\n }\n function get_mode_name_full($mode) {\n if ($mode == 0) { return 'osu!'; }\n else if ($mode == 1) { return 'Taiko'; }\n else if ($mode == 2) { return 'Catch the Beat'; }\n else if ($mode == 3) { return 'osu!mania'; }\n }\n function get_pdo(){\n $database_config = parse_ini_file('../conf/database.conf');\n $db_dbname = $database_config['dbname'];\n $db_host = $database_config['host'];\n $db_port = $database_config['port'];\n $db_charset = $database_config['charset'];\n $db_user = $database_config['user'];\n $db_password = $database_config['password'];\n # Connect to database.\n $dsn = 'mysql:dbname='.$db_dbname.'; host='.$db_host.'; port='.$db_port.'; charset='.$db_charset;\n $pdo = new PDO($dsn, $db_user, $db_password);\n return $pdo;\n }\n function get_users_stats($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM m_users_stats WHERE user_id = :user_id AND updated_on = (SELECT MAX(updated_on) FROM m_users_stats WHERE user_id = :user_id)';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $users_stats = array();\n $mode_name = get_mode_name_short($mode_num);\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $users_stats += array('user_id' => $row['user_id']);\n $users_stats += array('username' => $row['username']);\n $users_stats += array('username_aka' => $row['username_aka']);\n $users_stats += array('registered_on' => $row['registered_on']);\n $users_stats += array('privileges' => $row['privileges']);\n $users_stats += array('latest_activity' => $row['latest_activity']);\n $users_stats += array('country' => $row['country']);\n $users_stats += array('ranked_score' => $row['ranked_score_'.$mode_name]);\n $users_stats += array('total_score' => $row['total_score_'.$mode_name]);\n $users_stats += array('playcount' => $row['playcount_'.$mode_name]);\n $users_stats += array('replays_watched' => $row['replays_watched_'.$mode_name]);\n $users_stats += array('total_hits' => $row['total_hits_'.$mode_name]);\n $users_stats += array('level' => $row['level_'.$mode_name]);\n $users_stats += array('accuracy' => $row['accuracy_'.$mode_name]);\n $users_stats += array('pp' => $row['pp_'.$mode_name]);\n $users_stats += array('global_leaderboard_rank' => $row['global_leaderboard_rank_'.$mode_name]);\n $users_stats += array('country_leaderboard_rank' => $row['country_leaderboard_rank_'.$mode_name]);\n $users_stats += array('play_style' => $row['play_style']);\n $users_stats += array('favourite_mode' => $row['favourite_mode']);\n }\n return $users_stats;\n }\n function get_users_max_combo($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT MAX(max_combo) AS max_combo FROM m_users_scores WHERE user_id = :user_id AND play_mode = :mode_num';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n $users_best_scores = array();\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $max_combo = $row['max_combo'];\n }\n return $max_combo;\n }\n function print_best_performance_scores($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT m_users_scores.score_id AS score_id, m_users_scores.mods AS mods, m_users_scores.rank AS rank, m_users_scores.time AS time, m_users_scores.accuracy AS accuracy, m_users_scores.pp AS pp, m_beatmaps.song_name, m_beatmaps.beatmap_id as beatmap_id FROM m_users_scores INNER JOIN m_beatmaps ON m_users_scores.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE m_users_scores.user_id = :user_id AND m_users_scores.play_mode = :mode_num ORDER BY pp DESC LIMIT 100;';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n $weight_percent = [100, 95, 90, 86, 81, 77, 74, 70, 66, 63,\n 60, 57, 54, 51, 49, 46, 44, 42, 38, 36,\n 34, 32, 31, 29, 28, 26, 25, 24, 23, 21,\n 20, 19, 28, 17, 17, 16, 15, 14, 14, 13,\n 12, 12, 11, 10, 10, 9, 9, 9, 8, 8,\n 7, 7, 7, 6, 6, 6, 5, 5, 5, 5,\n 4, 4, 4, 4, 4, 3, 3, 3, 3, 3,\n 3, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];\n $counter = 0;\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $score_id = $row['score_id'];\n $mods = $row['mods'];\n $rank = $row['rank'];\n $time = $row['time'];\n $accuracy = $row['accuracy'];\n $song_name = $row['song_name'];\n $beatmap_id = $row['beatmap_id'];\n $pp = $row['pp'];\n print('<div class=\"ui horizontal two column grid attached segment bp\" style=\"margin: 0\">');\n print('<div class=\"eleven wide column left aligned\">');\n print('<p style=\"margin-bottom: 0.5rem\">');\n print('<img src=\"/images/'.$rank.'_small.png\" style=\"padding-right: 8px\">');\n if ($mods == 0) {\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> ('.sprintf('%0.2f', $accuracy).'%)');\n } else {\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> <b>+'.get_mods($mods).'</b> ('.sprintf('%0.2f', $accuracy).'%)');\n }\n print('</p>');\n print('<p>'.get_datetime_diff($time).'</p>');\n print('</div>');\n print('<div class=\"five wide column right aligned\">');\n print('<p style=\"margin-bottom: 0.5rem\">'.sprintf('%d', $pp).'pp</p>');\n print('<p>weighted '.$weight_percent[$counter].'% ('.sprintf('%d', $pp * ($weight_percent[$counter] / 100)).'pp) <a href=\"https://ripple.moe/web/replays/'.$score_id.'\"><i class=\"star link icon\"></i></a></p>');\n print('</div>');\n print('</div>');\n $counter = $counter + 1;\n }\n }\n function get_users_ranks_count($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT rank, COUNT(rank) AS count FROM m_users_scores WHERE user_id = :user_id AND play_mode = :mode_num GROUP BY rank';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n $count_ss = 0;\n $count_s = 0;\n $count_a = 0;\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n if ($row['rank'] == 'SSH' or $row['rank'] == 'SS') {\n $count_ss = $count_ss + $row['count'];\n } else if ($row['rank'] == 'SH' or $row['rank'] == 'S') {\n $count_s = $count_s + $row['count'];\n } else if ($row['rank'] == 'A') {\n $count_a = $count_a + $row['count'];\n }\n }\n $rank_count = array('ss' => $count_ss, 's' => $count_s, 'a' => $count_a);\n return $rank_count;\n }\n function get_users_activity($user_id) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM t_users_activity WHERE user_id = :user_id ORDER BY archived_on DESC LIMIT 30';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $counter = 0;\n $users_activity = array();\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $users_activity_temp = array();\n $users_activity_temp += array('archived_on' => $row['archived_on']);\n $users_activity_temp += array('song_name' => $row['song_name']);\n $users_activity_temp += array('beatmap_id' => $row['beatmap_id']);\n $users_activity_temp += array('type' => $row['type']);\n $users_activity_temp += array('ranking' => $row['ranking']);\n $users_activity_temp += array('rank' => $row['rank']);\n $users_activity_temp += array('mode' => $row['mode']);\n $users_activity += array($counter => $users_activity_temp);\n $counter += 1;\n }\n return $users_activity;\n }\n function get_users_pp_rank_history($user_id, $mode_num) {\n $pdo = get_pdo();\n $mode_name = get_mode_name_short($mode_num);\n $global_leaderboard_rank = 'global_leaderboard_rank_'.$mode_name;\n $query = 'SELECT date_format(created_on, \\'%Y-%m-%d\\') AS date, MAX('.$global_leaderboard_rank.') AS pp_rank FROM t_users_stats WHERE user_id = :user_id GROUP BY date_format(created_on, \\'%Y-%m-%d\\') ORDER BY pp_rank DESC;';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $date = [];\n $pp_rank = [];\n $counter = 0;\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $date[] = $row['date'];\n $pp_rank[] = $row['pp_rank'];\n $counter += 1;\n if ($counter > 90) { break; }\n }\n $pp_rank_history = [];\n $pp_rank_history[] = $date;\n $pp_rank_history[] = $pp_rank;\n return $pp_rank_history;\n }\n function get_users_playcount_history($user_id, $mode_num) {\n $pdo = get_pdo();\n $mode_name = get_mode_name_short($mode_num);\n $playcount = 'playcount_'.$mode_name;\n $query = 'SELECT month, '.$playcount.' AS playcount FROM t_users_stats_monthly WHERE user_id = :user_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $month = [];\n $playcount = [];\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $month[] = $row['month'];\n $playcount[] = $row['playcount'];\n }\n $playcount_history = [];\n $playcount_history[] = $month;\n $playcount_history[] = $playcount;\n return $playcount_history;\n }\n function get_users_replays_history($user_id, $mode_num) {\n $pdo = get_pdo();\n $mode_name = get_mode_name_short($mode_num);\n $replays = 'replays_watched_'.$mode_name;\n $query = 'SELECT month, '.$replays.' AS replays FROM t_users_stats_monthly WHERE user_id = :user_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $month = [];\n $replays = [];\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $month[] = $row['month'];\n $replays[] = $row['replays'];\n }\n $replays_history = [];\n $replays_history[] = $month;\n $replays_history[] = $replays;\n return $replays_history;\n }\n function get_datetime_diff($datetime) {\n $now = new Datetime();\n $datetime = new Datetime($datetime);\n $month = date_diff($datetime, $now) -> format('%m');\n $day = date_diff($datetime, $now) -> format('%d');\n $hour = date_diff($datetime, $now) -> format('%h');\n $minute = date_diff($datetime, $now) -> format('%i');\n if ($month > 1) {\n return $month.' months ago';\n } else if ($day > 1) {\n return $day.' days ago';\n } else if ($day > 0) {\n return 'about '.(24 + $hour).' hours ago';\n } else if ($hour > 0) {\n return 'about '.$hour.' hours ago';\n } else if ($minute > 0) {\n return $minute.' minutes ago';\n } else {\n return 'less than minutes ago';\n }\n }\n function print_users_activity($user_id, $username) {\n $users_activity = get_users_activity($user_id);\n $counter = 0;\n foreach ($users_activity as $activity) {\n $type = $activity['type'];\n $ranking = $activity['ranking'];\n $archived_on = $activity['archived_on'];\n $rank = $activity['rank'];\n $beatmap_id = $activity['beatmap_id'];\n $song_name = $activity['song_name'];\n $mode_name = get_mode_name_full($activity['mode']);\n if ($type != 2 and $ranking < 51) {\n print('<div class=\"ui three wide column left aligned\" style=\"padding-top: 0.5rem; padding-bottom: 0.5rem\">');\n print('<p>'.get_datetime_diff($archived_on).'</p>');\n print('</div>');\n print('<div class=\"ui thirteen wide column left aligned\" style=\"padding-top : 0.5rem; padding-bottom: 0.5rem\">');\n print('<p>');\n print('<img src=\"/images/'.$rank.'_small.png\" style=\"padding-right: 8px\"/>');\n print($username.' archived rank ');\n if ($ranking < 4) {\n print('<b>#'.$ranking.'</b> on <a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> ('.$mode_name.')');\n } else {\n print('#'.$ranking.' on <a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> ('.$mode_name.')');\n }\n print('</p>');\n print('</div>');\n $counter += 1;\n } else if ($type == 2) {\n print('<div class=\"three wide column attached segment\" style=\"padding-top: 0.5rem; padding-bottom: 0.5rem\">');\n print('<p>'.get_datetime_diff($archived_on).'</p>');\n print('</div>');\n print('<div class=\"thirteen wide column attached segment\" style=\"padding-top: 0.5rem; padding-bottom: 0.5rem\">');\n print('<p>');\n print($username.' has lost first place on on <a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> ('.$mode_name.')');\n print('</p>');\n print('</div>');\n $counter += 1;\n }\n if ($counter > 15) { break; }\n }\n }\n function print_first_place_ranks($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT m_first_place.score_id AS score_id, m_first_place.mods AS mods, m_first_place.rank AS rank, m_first_place.time AS time, m_first_place.accuracy AS accuracy, m_first_place.pp AS pp, m_beatmaps.song_name, m_beatmaps.beatmap_id as beatmap_id FROM m_first_place INNER JOIN m_beatmaps ON m_first_place.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE m_first_place.user_id = :user_id AND m_first_place.play_mode = :mode_num ORDER BY time DESC;';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $rank = $row['rank'];\n $song_name = $row['song_name'];\n $mods = $row['mods'];\n $accuracy = $row['accuracy'];\n $time = $row['time'];\n $pp = $row['pp'];\n $score_id = $row['score_id'];\n $beatmap_id = $row['beatmap_id'];\n print('<div class=\"ui horizontal two column grid attached segment fp\" style=\"margin: 0\">');\n print('<div class=\"thirteen wide column left aligned\">');\n print('<p style=\"margin-bottom: 0.5rem\">');\n print('<img src=\"/images/'.$rank.'_small.png\" style=\"padding-right: 8px\">');\n if ($mods == 0) {\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> ('.sprintf('%0.2f', $accuracy).'%)');\n } else {\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\">'.$song_name.'</a> <b>+'.get_mods($mods).'</b> ('.sprintf('%0.2f', $accuracy).'%)');\n }\n print('</p>');\n print('<p>'.get_datetime_diff($time).'</p>');\n print('</div>');\n print('<div class=\"three wide column right aligned\">');\n print('<p style=\"margin-bottom: 0.5rem\">'.sprintf('%d', $pp).'pp</p>');\n print('<p><a href=\"https://ripple.moe/web/replays/'.$score_id.'\"><i class=\"star link icon\"></i></a></p>');\n print('</div>');\n print('</div>');\n }\n }\n function print_users_recent_plays($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT t_users_scores.mods AS mods, t_users_scores.rank AS rank, t_users_scores.time AS time, t_users_scores.score AS score, m_beatmaps.song_name, m_beatmaps.beatmap_id as beatmap_id FROM t_users_scores INNER JOIN m_beatmaps ON t_users_scores.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE t_users_scores.user_id = :user_id AND t_users_scores.play_mode = :mode_num ORDER BY time DESC LIMIT 5;';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $song_name = $row['song_name'];\n $beatmap_id = $row['beatmap_id'];\n $score = $row['score'];\n $mods = $row['mods'];\n $time = $row['time'];\n $rank = $row['rank'];\n print('<div class=\"ui left aligned\">');\n print('<p>');\n print(get_datetime_diff($time).' - ');\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\"> '.$song_name.'</a> ');\n print(number_format($score).' ('.$rank.') '.get_mods($mods));\n print('</p>');\n print('</div>');\n }\n }\n function print_users_most_passed_beatmaps($user_id, $mode_num) {\n $pdo = get_pdo();\n $query = 'SELECT COUNT(*) AS count, m_beatmaps.song_name, m_beatmaps.beatmap_id FROM t_users_scores INNER JOIN m_beatmaps ON t_users_scores.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE t_users_scores.user_id = :user_id and t_users_scores.play_mode = :mode_num GROUP BY m_beatmaps.beatmap_md5 ORDER BY count DESC LIMIT 15';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id, 'mode_num' => $mode_num]);\n $font_size = 180;\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n $count = $row['count'];\n $song_name = $row['song_name'];\n $beatmap_id = $row['beatmap_id'];\n print('<div class=\"ui left aligned\">');\n print('<p style=\"font-size: '.$font_size.'%\";>');\n print($count.' Plays - ');\n print('<a href=\"https://ripple.moe/b/'.$beatmap_id.'\"> '.$song_name.'</a> ');\n print('</p>');\n print('</div>');\n $font_size = $font_size - 6;\n }\n }\n function print_pp_chart_label($date) {\n for ($i = count($date); 0 < $i; $i--) {\n print('\"'.$i.'\", ');\n }\n }\n function print_pp_chart_data($pp_rank) {\n for ($i = 0; $i < count($pp_rank); $i++) {\n print($pp_rank[$i].',');\n }\n }\n function print_playcount_chart_label($date) {\n for ($i = 0; $i < count($date); $i++) {\n print('\"'.$date[$i].'\",');\n }\n }\n function print_playcount_chart_data($playcount) {\n for ($i = 0; $i < count($playcount); $i++) {\n print($playcount[$i].',');\n }\n }\n function print_replays_chart_label($date) {\n for ($i = 0; $i < count($date); $i++) {\n print('\"'.$date[$i].'\",');\n }\n }\n function print_replays_chart_data($replays) {\n for ($i = 0; $i < count($replays); $i++) {\n print($replays[$i].',');\n }\n }\n function get_mods($mods_num) {\n $mods = array();\n $bin = decbin($mods_num);\n $bits = str_split($bin);\n $bits = array_reverse($bits);\n $bits = array_filter($bits);\n foreach ( $bits as $pos => $bit ) {\n $bits[$pos] = pow(2, $pos);\n }\n $bits = array_values($bits);\n foreach ($bits as $bit) {\n if ($bit == 16384) {\n if (($key = array_search('SD', $mods)) !== false) {\n unset($mods[$key]);\n array_push($mods, 'PF');\n }\n } else if ($bit == 512) {\n if (($key = array_search('DT', $mods)) !== false) {\n unset($mods[$key]);\n array_push($mods, 'NC');\n }\n } else {\n array_push($mods, get_mod_str($bit));\n }\n }\n $mods_str = '';\n foreach ($mods as $mod) {\n $mods_str = $mods_str.$mod;\n if (next($mods) == True) {\n $mods_str = $mods_str.',';\n }\n }\n return $mods_str;\n }\n function get_mod_str($mod_num) {\n if ($mod_num == 1) {return 'NF'; }\n else if ($mod_num == 2) {return 'EZ';}\n else if ($mod_num == 8) {return 'HD';}\n else if ($mod_num == 16) {return 'HR';}\n else if ($mod_num == 32) {return 'SD';}\n else if ($mod_num == 64) {return 'DT';}\n else if ($mod_num == 256) {return 'HT';}\n else if ($mod_num == 576) {return 'NC';}\n else if ($mod_num == 1024) {return 'FL';}\n else if ($mod_num == 4096) {return 'SO';}\n }\n function get_playstyle_array($playstyle_num) {\n $playstyle = array();\n $bin = decbin($playstyle_num);\n $bits = str_split($bin);\n $bits = array_reverse($bits);\n $bits = array_filter($bits);\n foreach ( $bits as $pos => $bit ) {\n $bits[$pos] = pow(2, $pos);\n }\n $playstyle_num_array = array_values($bits);\n return $playstyle_num_array;\n }\n function print_donor_badge($user_id) {\n $pdo = get_pdo();\n $query = 'SELECT COUNT(*) AS count FROM m_users_badges WHERE user_id = :user_id AND badge_id = 14';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':user_id' => $user_id]);\n $row = $statement -> fetch(PDO::FETCH_ASSOC);\n $is_donor = $row['count'];\n if ($is_donor == 1) {\n print('<div class=\"ui label\">');\n print('<i class=\"money icon\"></i> Ripple Donor');\n print('</div>');\n }\n }\n?>\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5526094436645508, "avg_line_length": 30.68000030517578, "blob_id": "5cc08dfe13176e683f37f334171764eb3c13f012", "content_id": "1d1f7d1a1d21d600d5169a49e1a5956423abb730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 96, "num_lines": 75, "path": "/www/manage/login.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n require_once './functions.php';\n require_unlogined_session();\n\n $hashes = [\n 'orsmng' => '$2y$10$xMWxxPRjm5joUpIMr7Pr6.P2jtMWC4I5Y.mXEQ89/HOstOHbdwvE.',\n ];\n\n $username = filter_input(INPUT_POST, 'username');\n $password = filter_input(INPUT_POST, 'password');\n if ($_SERVER['REQUEST_METHOD'] === 'POST') {\n if (\n validate_token(filter_input(INPUT_POST, 'token')) &&\n password_verify(\n $password,\n isset($hashes[$username])\n ? $hashes[$username]\n : 'ripple'\n )\n ) {\n session_regenerate_id(true);\n $_SESSION['username'] = $username;\n header('Location: /manage/portal.php');\n }\n http_response_code(403);\n }\n header('Content-Type: text/html; charset=UTF-8');\n?>\n<!DOCTYPE html>\n<html>\n<head>\n<title>Login</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/semantic/dist/semantic.min.css\">\n<script\n src=\"https://code.jquery.com/jquery-3.1.1.min.js\"\n integrity=\"sha256-hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8=\"\n crossorigin=\"anonymous\"></script>\n<script src=\"/semantic/dist/semantic.min.js\"></script>\n</head>\n<body>\n <div class=\"ui container\" style=\"padding-top: 10em;\">\n <h1 class=\"ui header center aligned\" style=\"padding-bottom: 2em;\">ORS Management Portal</h1>\n <div class=\"ui grid\">\n <div class=\"five wide column\"></div>\n <div class=\"six wide column\">\n <?php if (http_response_code() === 403): ?>\n <div class=\"ui error message\">\n <div class=\"header\">\n Login failed.\n </div>\n <p>Invalid username or password.</p>\n </div>\n <?php endif; ?>\n <div class=\"ui segment center aligned\">\n <form class=\"ui form\" method=\"post\" action=\"\">\n <div class=\"field\">\n <input type=\"text\" name=\"username\" value=\"\" placeholder=\"Username\">\n </div>\n <div class=\"field\">\n <input type=\"password\" name=\"password\" value=\"\" placeholder=\"Password\">\n </div>\n <div class=\"field\">\n <input type=\"hidden\" name=\"token\" value=\"<?=h(generate_token()) ?>\">\n </div>\n <div class=\"field\">\n <button class=\"ui button center aligned\" type=\"submit\">Login</button>\n </div>\n </form>\n </div>\n </div>\n <div class=\"five wide column\"></div>\n </div>\n </div>\n</body>\n</html>\n" }, { "alpha_fraction": 0.6497005820274353, "alphanum_fraction": 0.6497005820274353, "avg_line_length": 12.916666984558105, "blob_id": "e0c73f401c1fb581701473abbb50cc0d70450a11", "content_id": "7f3494cf961eb529a55ae1ebabf54d5156ba3a1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 334, "license_type": "no_license", "max_line_length": 43, "num_lines": 24, "path": "/sql/t_users_activity_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_scores scores\nWHERE NOT EXISTS(\n SELECT\n *\n FROM\n t_users_activity activity\n WHERE\n scores.score_id = activity.score_id AND\n activity.user_id = %s\n) AND NOT EXISTS (\n SELECT\n *\n FROM\n l_scores_on_activity list\n WHERE\n list.score_id = scores.score_id\n)\nORDER BY\n scores.time\nASC\n;\n" }, { "alpha_fraction": 0.6203294396400452, "alphanum_fraction": 0.6319807171821594, "avg_line_length": 34.55714416503906, "blob_id": "dc9f352946c4c4ee4340489cdc18ac1cdd94337d", "content_id": "1704119c2122b01c9900344e0b5ed42e6e92c9e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 107, "num_lines": 70, "path": "/ors/script/logger.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "# coding: utf-8\n# Referenced from 'pythonใฎlogๅ‡บๅŠ› <https://qiita.com/yopya/items/63155923602bf97dec53>' by yopya.\n# Thank you for yopya <3\n\nimport logging\nimport logging.handlers\nimport configparser\n\nclass logger:\n def __init__(self, name=__name__):\n name = name.replace('.py', '')\n # Defination of logger properties.\n self.logger = logging.getLogger(name)\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"[%(asctime)s][%(process)d][%(levelname)s][%(name)s]%(message)s\")\n\n # include level debug\n log_level = 10\n # stdout\n handler = logging.StreamHandler()\n handler.setLevel(int(log_level))\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n\n # fileout\n log_path = 'log/' + name + '.log'\n handler = logging.handlers.RotatingFileHandler(filename=log_path, maxBytes=11048576, backupCount=3)\n\n handler.setLevel(int(log_level))\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n\n def __get_message_list():\n \"\"\"\n Read message list file object and returns it.\n \"\"\"\n message_list_path = 'conf/message.list'\n message_object = configparser.RawConfigParser()\n message_object.read(message_list_path, 'utf-8')\n return message_object\n\n def debug(self, message_code, *values):\n message_list = logger.__get_message_list()\n message = message_list['debug'][message_code]\n message = message % values\n self.logger.debug(message)\n\n def info(self, message_code, *values):\n message_list = logger.__get_message_list()\n message = message_list['info'][message_code]\n message = message % values\n self.logger.info(message)\n\n def warn(self, message_code, *values):\n message_list = logger.__get_message_list()\n message = message_list['warn'][message_code]\n message = message % values\n self.logger.warning(message)\n\n def error(self, message_code, *values):\n message_list = logger.__get_message_list()\n message = message_list['error'][message_code]\n message = message % values\n self.logger.error(message)\n\n def critical(self, message_code, *values):\n message_list = logger.__get_message_list()\n message = message_list['critical'][message_code]\n message = message % values\n self.logger.critical(message, stack_info=True, exc_info=True)\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.6034482717514038, "avg_line_length": 7.285714149475098, "blob_id": "17d9d69b6e05cb93147f18e12c16206a4bd74b57", "content_id": "47476d875563b4d669efb2f103ede5cd43833ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 58, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/sql/m_beatmaps_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n m_beatmaps\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 8.285714149475098, "blob_id": "170a28921c2308826a1295e59e4c5b7b298d6e0d", "content_id": "5ef4faa7a014da6efe4963654d4812a5967fbce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 65, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/sql/m_users_001.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n COUNT(*) AS count\nFROM\n m_users\nWHERE\n user_id = %s\n;\n" }, { "alpha_fraction": 0.594622790813446, "alphanum_fraction": 0.6105455756187439, "avg_line_length": 44.60714340209961, "blob_id": "2c640db520645e50176daebf096180f7c78f6ff3", "content_id": "f74d076b05d7cb5f9a1eecda12186d4f3aad71d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3831, "license_type": "no_license", "max_line_length": 114, "num_lines": 84, "path": "/ors/main/users_stats_master.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_stats_master import UsersStatsMaster\n UsersStatsMaster().execute()\n\nclass UsersStatsMaster(object):\n global log\n global database\n global connection\n log = logger.logger('users_stats_master')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersStatsMaster')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n latest_stats = self.__get_users_latest_stats(user_id)\n self.__set_users_stats_master(latest_stats, user_id)\n self.__set_users_badges_master(user_id)\n #self.__set_users_silence_info_master(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersStatsMaster')\n except Exception as e:\n log.critical('ORSC0001', 'UsersStatsMaster', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __get_users_latest_stats(self, user_id):\n result = database.execute_statement(connection, 't_users_stats_S01', user_id)\n users_latest_stats = result[1]\n return users_latest_stats\n\n def __set_users_stats_master(self, latest_stats, user_id):\n result = database.execute_statement(connection, 'm_users_stats_S01', user_id)\n count = result[1][0]['count']\n latest_stats = latest_stats[0]\n if count == 0:\n log.info('ORSI0006', latest_stats['user_id'], latest_stats['username'])\n del latest_stats['updated_on']\n result = database.execute_statement_values(connection, 'm_users_stats_I01', latest_stats.values())\n log.debug('ORSD0002', 'm_users_stats', result[0], user_id)\n else:\n log.info('ORSI0007', latest_stats['user_id'], latest_stats['username'])\n del latest_stats['created_on']\n del latest_stats['updated_on']\n latest_stats.update(user_id_key=latest_stats['user_id'])\n result = database.execute_statement_values(connection, 'm_users_stats_U01', latest_stats.values())\n log.debug('ORSD0007', 'm_users_stats', result[0], user_id)\n\n def __set_users_badges_master(self, user_id):\n # Get latest users badges.\n result = database.execute_statement(connection, 't_users_badges_S01', user_id)\n users_badges = result[1]\n # Check users badges records are exists on master table.\n result = database.execute_statement(connection, 'm_users_badges_S01', user_id)\n count = result[1][0]['count']\n if count == 0:\n # If count is 0, there are no records on master table.\n # So create new record with insert statement.\n for users_badge in users_badges:\n del users_badge['updated_on']\n result = database.execute_statement_values(connection, 'm_users_badges_I01', users_badge.values())\n else:\n # If count is not 0, there are records on master table.\n # So update exists records with update statement.\n for users_badge in users_badges:\n del users_badge['created_on']\n del users_badge['updated_on']\n users_badge.update(user_id_key=users_badge['user_id'])\n users_badge.update(badge_id_key=users_badge['badge_id'])\n result = database.execute_statement_values(connection, 'm_users_badges_U01', users_badge.values())\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 12.199999809265137, "blob_id": "e68153632805d5d57340c2d69cff95ae31937c70", "content_id": "e95b67ab403812010f35afaa4c4d307c7624a2d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 66, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/sql/m_first_place_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n m_first_place\nWHERE\n m_first_place.score_id = %s\n;\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 8, "blob_id": "047b5390ae2d04996ce3d29046c332bbe8e3076c", "content_id": "8b2b68505fc8c612f3f84fa2b1eb9d606ad70142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 27, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/sql/w_beatmaps_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n w_beatmaps\n;\n" }, { "alpha_fraction": 0.6864407062530518, "alphanum_fraction": 0.6864407062530518, "avg_line_length": 13.75, "blob_id": "55bb120895de44bf57dfefc2004a4f79285a1598", "content_id": "fe44d04ea1be4ed73c67530629c81e9b44eaf3ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 236, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/sql/t_users_silence_info_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n t_users_silence_info\nSELECT\n *\nFROM\n w_users_silence_info work\nWHERE\n work.user_id = %s\nAND NOT EXISTS(\n SELECT\n *\n FROM\n t_users_silence_info transaction\n WHERE\n transaction.created_on = work.created_on\n);\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 37, "blob_id": "bea072ac0085c89629a9ef7a4a64eacffdf856f8", "content_id": "15a32a6bd839dc8d880f53f488175cdee33e243b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 38, "license_type": "no_license", "max_line_length": 37, "num_lines": 1, "path": "/sql/s_api_request_count_tick_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM s_api_request_count_tick;\n" }, { "alpha_fraction": 0.6225875616073608, "alphanum_fraction": 0.6390278935432434, "avg_line_length": 33.974998474121094, "blob_id": "35f5ba809e6e207013a5b4854fb0584b10d8242c", "content_id": "1546b87ef65e4bffe2dad77b334b4960928d0f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1399, "license_type": "no_license", "max_line_length": 86, "num_lines": 40, "path": "/ors/main/users_scores_transaction.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_scores_transaction import UsersScoresTransaction\n UsersScoresTransaction().execute()\n\nclass UsersScoresTransaction(object):\n global log\n global database\n global connection\n log = logger.logger('Users_stats_transaction')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersScoresTransaction')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n self.__set_users_scores_transaction(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersScoresTransaction')\n except Exception as e:\n log.critical('ORSC0001', 'UsersScoresTransaction', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __set_users_scores_transaction(self, user_id):\n result = database.execute_statement(connection, 't_users_scores_I01', user_id)\n log.debug('ORSD0002', 't_users_scores', result[0], user_id)\n" }, { "alpha_fraction": 0.6527777910232544, "alphanum_fraction": 0.6527777910232544, "avg_line_length": 9.285714149475098, "blob_id": "8257a2b96eab5febf5bbbb57e100ef32a3cac9e4", "content_id": "6359aeb313c5bf7bdffc34337437979b1a4616f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 72, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/sql/t_users_scores_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_scores\nWHERE\n t_users_scores.score_id = %s\n;\n" }, { "alpha_fraction": 0.4750656187534332, "alphanum_fraction": 0.5013123154640198, "avg_line_length": 14.239999771118164, "blob_id": "245c6c4393ef033779d616094312af5f877ca155", "content_id": "741684377a414e90336ee88f2f659336c95efecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 381, "license_type": "no_license", "max_line_length": 21, "num_lines": 25, "path": "/sql/m_first_place_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n m_first_place\nSET\n user_id = %s,\n score_id = %s,\n beatmap_md5 = '%s',\n max_combo = %s,\n score = %s,\n is_full_combo = %s,\n mods = %s,\n count_300 = %s,\n count_100 = %s,\n count_50 = %s,\n count_geki = %s,\n count_katu = %s,\n count_miss = %s,\n time = '%s',\n play_mode = %s,\n accuracy = %s,\n pp = %s,\n rank = '%s',\n completed = %s\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.6139295101165771, "alphanum_fraction": 0.6139295101165771, "avg_line_length": 22.260000228881836, "blob_id": "a972c592a8e06f36524589eb29262c86f332c352", "content_id": "f6ea18b43867cbd22e9df26acf0a3a90a22590b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 38, "num_lines": 50, "path": "/sql/t_users_stats_monthly_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n t_users_stats_monthly\nSET\n user_id = %s,\n username = '%s',\n month = '%s',\n ranked_score_std = %s,\n total_score_std = %s,\n playcount_std = %s,\n replays_watched_std = %s,\n total_hits_std = %s,\n level_std = %s,\n accuracy_std = %s,\n pp_std = %s,\n global_leaderboard_rank_std = %s,\n country_leaderboard_rank_std = %s,\n ranked_score_taiko = %s,\n total_score_taiko = %s,\n playcount_taiko = %s,\n replays_watched_taiko = %s,\n total_hits_taiko = %s,\n level_taiko = %s,\n accuracy_taiko = %s,\n pp_taiko = %s,\n global_leaderboard_rank_taiko = %s,\n country_leaderboard_rank_taiko = %s,\n ranked_score_ctb = %s,\n total_score_ctb = %s,\n playcount_ctb = %s,\n replays_watched_ctb = %s,\n total_hits_ctb = %s,\n level_ctb = %s,\n accuracy_ctb = %s,\n pp_ctb = %s,\n global_leaderboard_rank_ctb = %s,\n country_leaderboard_rank_ctb = %s,\n ranked_score_mania = %s,\n total_score_mania = %s,\n playcount_mania = %s,\n replays_watched_mania = %s,\n total_hits_mania = %s,\n level_mania = %s,\n accuracy_mania = %s,\n pp_mania = %s,\n global_leaderboard_rank_mania = %s,\n country_leaderboard_rank_mania = %s\nWHERE\n user_id = %s AND\n month = '%s'\n;\n" }, { "alpha_fraction": 0.5272108912467957, "alphanum_fraction": 0.5340136289596558, "avg_line_length": 14.473684310913086, "blob_id": "95936bab80057323d7051ee2fee7540eb2ec3dde", "content_id": "3657a478671cac8a8da760644be9f81114059e20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 294, "license_type": "no_license", "max_line_length": 28, "num_lines": 19, "path": "/sql/m_beatmaps_U01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "UPDATE\n m_beatmaps\nSET\n beatmap_id = %s,\n beatmapset_id = %s,\n beatmap_md5 = '%s',\n song_name = '%s',\n ar = %s,\n od = %s,\n difficulty = %s,\n max_combo = %s,\n hit_length = %s,\n ranked = %s,\n ranked_status_frozen = %s,\n latest_update = '%s',\n mode = %s\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.5447958111763, "alphanum_fraction": 0.5619235634803772, "avg_line_length": 36.024391174316406, "blob_id": "11cd54b3efaaf54f802bdfff6665f83eb27e6816", "content_id": "f63ba39241bc039ab5dfcca53e6cbaaa79150ef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 109, "num_lines": 41, "path": "/www/ranks.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php\n $database_config = parse_ini_file('../conf/database.conf');\n $db_dbname = $database_config['dbname'];\n $db_host = $database_config['host'];\n $db_port = $database_config['port'];\n $db_charset = $database_config['charset'];\n $db_user = $database_config['user'];\n $db_password = $database_config['password'];\n\n # Connect to database.\n $dsn = 'mysql:dbname='.$db_dbname.'; host='.$db_host.'; port='.$db_port.'; charset='.$db_charset;\n $pdo = new PDO($dsn, $db_user, $db_password);\n\n $query = 'SELECT rank, COUNT(rank) AS count FROM m_users_scores GROUP BY rank';\n\n $statement = $pdo -> prepare($query);\n $statement -> execute();\n\n # Set selected data.\n $count_ss = 0;\n $count_s = 0;\n $count_a = 0;\n while ($row = $statement -> fetch(PDO::FETCH_ASSOC)) {\n if ($row['rank'] == 'SSH' or $row['rank'] == 'SS') {\n $count_ss = $count_ss + $row['count'];\n } else if ($row['rank'] == 'SH' or $row['rank'] == 'S') {\n $count_s = $count_s + $row['count'];\n } else if ($row['rank'] == 'A') {\n $count_a = $count_a + $row['count'];\n }\n}\n?>\n<table align=\"center\" width=\"400\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr>\n <td width=\"42\"><img height=\"42\" src=\"/images/SS.png\"></td><td width=\"50\"><?php print($count_ss) ?></td>\n <td width=\"42\"><img height=\"42\" src=\"/images/S.png\"></td><td width=\"50\"><?php print($count_s) ?></td>\n <td width=\"42\"><img height=\"42\" src=\"/images/A.png\"></td><td width=\"50\"><?php print($count_a) ?></td>\n </tr>\n </tbody>\n</table>\n" }, { "alpha_fraction": 0.6216216087341309, "alphanum_fraction": 0.6576576828956604, "avg_line_length": 23.66666603088379, "blob_id": "22214567542a4c536f6b1a2d5056091bd66f5f0c", "content_id": "c4d57ce5651b389b0fe96e6f782fd4a76a6d41b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 222, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/sql/s_api_request_count_hourly_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n s_api_request_count_hourly\nSELECT\n DATE_FORMAT(datetime, '%%Y-%%m-%%d %%H:00:00') AS datetime,\n SUM(count) AS count\nFROM\n s_api_request_count_tick\nGROUP BY\n DATE_FORMAT(datetime, '%%Y-%%m-%%d %%H:00:00');\n" }, { "alpha_fraction": 0.6429240703582764, "alphanum_fraction": 0.6579194068908691, "avg_line_length": 32.34375, "blob_id": "51bf147092d3b8fe8deb00a46751694a1959d05c", "content_id": "19f44195fd934d03797556782acade0edb198b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 89, "num_lines": 32, "path": "/ors/special/stat_api_request_hourly.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.special.stat_api_request_hourly import StatApiRequestHourly\n StatApiRequestHourly().execute()\n\nclass StatApiRequestHourly(object):\n global log\n global database\n global connection\n log = logger.logger('stat_api_request_hourly')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n process_name = 'StatApiRequestHourly'\n try:\n log.info('ORSI0001', process_name)\n self.__tally_api_request_hourly()\n connection.commit()\n connection.close()\n log.info('ORSI0002', process_name)\n except Exception as e:\n log.critical('ORSC0001', process_name, e)\n raise Exception(e)\n\n def __tally_api_request_hourly(self):\n result = database.execute_statement(connection, 's_api_request_count_hourly_I01')\n result = database.execute_statement(connection, 's_api_request_count_tick_D01')\n" }, { "alpha_fraction": 0.717666506767273, "alphanum_fraction": 0.724821150302887, "avg_line_length": 42.261905670166016, "blob_id": "c5d3036810e64042bc11b96616bd2d7462037ceb", "content_id": "5cd3d01ce7bb649fa01477d7adb951817f7b775c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 80, "num_lines": 42, "path": "/ors/main/admiral.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "from ors.main.users_stats_work import UsersStatsWork\nfrom ors.main.users_stats_transaction import UsersStatsTransaction\nfrom ors.main.users_stats_master import UsersStatsMaster\nfrom ors.main.users_stats_monthly import UsersStatsMonthly\nfrom ors.main.users_scores_work import UsersScoresWork\nfrom ors.main.users_scores_transaction import UsersScoresTransaction\nfrom ors.main.users_scores_master import UsersScoresMaster\nfrom ors.main.beatmap_master import BeatmapMaster\nfrom ors.main.users_activity import UsersActivity\nfrom ors.main.first_place_master import FirstPlaceMaster\nfrom ors.script import logger\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n from ors.main.admiral import Admiral\n Admiral().execute()\n\nclass Admiral(object):\n global log\n log = logger.logger('admiral')\n\n def execute(self):\n self.__execute_shell(UsersStatsWork(), 'UsersStatsWork')\n self.__execute_shell(UsersStatsTransaction(), 'UsersStatsTransaction')\n self.__execute_shell(UsersStatsMaster(), 'UsersStatsMaster')\n self.__execute_shell(UsersStatsMonthly(), 'UsersStatsMonthly')\n self.__execute_shell(UsersScoresWork(), 'UsersScoresWork')\n self.__execute_shell(BeatmapMaster(), 'BeatmapMaster')\n self.__execute_shell(UsersScoresTransaction(), 'UsersScoresTransaction')\n self.__execute_shell(UsersScoresMaster(), 'UsersScoresMaster')\n self.__execute_shell(UsersActivity(), 'UsersActivity')\n self.__execute_shell(FirstPlaceMaster(), 'FirstPlaceMaster')\n\n def __execute_shell(self, constructor, process_name):\n log.info('ORSI0001', process_name)\n try:\n constructor.execute()\n except Exception as e:\n log.critical('ORSC0001', process_name, e)\n sys.exit(1)\n log.info('ORSI0002', process_name)\n" }, { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.6712328791618347, "avg_line_length": 9.428571701049805, "blob_id": "32bcdef6f35359e0c2f092aa0e9a58debe0d6845", "content_id": "3fcc3bdc3b5a49f85cd582e0d35fc71293568616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 73, "license_type": "no_license", "max_line_length": 26, "num_lines": 7, "path": "/sql/s_api_request_count_tick_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n count\nFROM\n s_api_request_count_tick\nWHERE\n datetime = '%s'\n;\n" }, { "alpha_fraction": 0.6142557859420776, "alphanum_fraction": 0.6336477994918823, "avg_line_length": 37.15999984741211, "blob_id": "97e14091fb677b5a2f72b8aa83c545c9d340c9ef", "content_id": "afc91fb44fb23d26fed75bca021fd45822e1e0a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1908, "license_type": "no_license", "max_line_length": 92, "num_lines": 50, "path": "/ors/main/users_stats_transaction.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script import converter\nfrom ors.script import logger\n\nif __name__ == \"__main__\":\n from ors.main.users_stats_transaction import UsersStatsTransaction\n UsersStatsTransaction().execute()\n\nclass UsersStatsTransaction(object):\n global log\n global database\n global connection\n log = logger.logger('users_stats_transaction')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersStatsTransaction')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n self.__set_users_stats_transaction(user_id)\n self.__set_users_badges(user_id)\n self.__set_users_silence_info(user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersStatsTransaction')\n except Exception as e:\n log.critical('ORSC0001', 'UsersStatsTransaction', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __set_users_stats_transaction(self, user_id):\n result = database.execute_statement(connection, 't_users_stats_I01', user_id)\n log.debug('ORSD0002', 't_users_stats', result[0], user_id)\n\n def __set_users_badges(self, user_id):\n result = database.execute_statement(connection, 't_users_badges_I01', user_id)\n log.debug('ORSD0002', 't_users_badges', result[0], user_id)\n\n def __set_users_silence_info(self, user_id):\n result = database.execute_statement(connection, 't_users_silence_info_I01', user_id)\n log.debug('ORSD0002', 't_users_silence_info', result[0], user_id)\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 7, "blob_id": "2a3766704623c144bd07fe96e93cdc9b202db9fe", "content_id": "8257845c7150f1756eab8c59ca3ebd1bbb2507d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 56, "license_type": "no_license", "max_line_length": 15, "num_lines": 7, "path": "/sql/m_users_S02.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n user_id\nFROM\n m_users\nWHERE\n is_enable = 1\n;\n" }, { "alpha_fraction": 0.6301369667053223, "alphanum_fraction": 0.6301369667053223, "avg_line_length": 11.166666984558105, "blob_id": "a6025538e99011cd80f00d44218e610eaa0b9eea", "content_id": "2a1831dd7dbc8526520f515cf2551fa8036b72bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 73, "license_type": "no_license", "max_line_length": 18, "num_lines": 6, "path": "/sql/w_users_scores_D01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "DELETE FROM\n w_users_scores\nWHERE\n user_id = %s AND\n play_mode = %s\n;\n" }, { "alpha_fraction": 0.6271186470985413, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 7.4285712242126465, "blob_id": "80eb2cbf883262e9018bb82b878bdfdde9da0941", "content_id": "a61e6bf8e37c4d22f797a84d3fb2e5939f488f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 59, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/sql/t_users_activity_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_activity\nWHERE\n score_id = %s\n;\n" }, { "alpha_fraction": 0.6557161808013916, "alphanum_fraction": 0.6589575409889221, "avg_line_length": 54.95588302612305, "blob_id": "8071305fba0fc63433e70867faabb79f52d643d8", "content_id": "eb6e133530ee0f880972895e20d7a01e635368e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11415, "license_type": "no_license", "max_line_length": 137, "num_lines": 204, "path": "/ors/script/converter.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "from ors.script import converter\nfrom datetime import datetime\nimport pytz\n\ndef convert_users_score(user_id, users_score):\n score = {}\n score['user_id'] = user_id\n score['score_id'] = users_score['id']\n score['beatmap_md5'] = users_score['beatmap_md5']\n score['max_combo'] = users_score['max_combo']\n score['score'] = users_score['score']\n score['is_full_combo'] = int(users_score['full_combo'])\n score['mods'] = users_score['mods']\n score['count_300'] = users_score['count_300']\n score['count_100'] = users_score['count_100']\n score['count_50'] = users_score['count_50']\n score['count_geki'] = users_score['count_geki']\n score['count_katu'] = users_score['count_katu']\n score['count_miss'] = users_score['count_miss']\n score['time'] = convert_datetime(convert_iso_datetime(users_score['time']))\n score['play_mode'] = users_score['play_mode']\n score['accuracy'] = users_score['accuracy']\n score['pp'] = users_score['pp']\n score['rank'] = users_score['rank']\n score['completed'] = users_score['completed']\n score['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return score\n\ndef convert_users_stats(users_stats):\n modes = ['std', 'taiko', 'ctb', 'mania']\n stats = {}\n stats['user_id'] = users_stats['id']\n stats['username'] = users_stats['username']\n stats['username_aka'] = users_stats['username_aka']\n stats['registered_on'] = convert_datetime(convert_iso_datetime(users_stats['registered_on']))\n stats['privileges'] = users_stats['privileges']\n stats['latest_activity'] = convert_datetime(convert_iso_datetime(users_stats['latest_activity']))\n stats['country'] = users_stats['country']\n for mode in modes:\n stats['ranked_score_' + mode] = users_stats[mode]['ranked_score']\n stats['total_score_' + mode] = users_stats[mode]['total_score']\n stats['playcount_' + mode] = users_stats[mode]['playcount']\n stats['replays_watched_' + mode] = users_stats[mode]['replays_watched']\n stats['total_hits_' + mode] = users_stats[mode]['total_hits']\n stats['level_' + mode] = users_stats[mode]['level']\n stats['accuracy_' + mode] = users_stats[mode]['accuracy']\n stats['pp_' + mode] = users_stats[mode]['pp']\n if users_stats[mode]['global_leaderboard_rank'] == None:\n stats['global_leaderboard_rank_' + mode] = 0\n else:\n stats['global_leaderboard_rank_' + mode] = users_stats[mode]['global_leaderboard_rank']\n if users_stats[mode]['country_leaderboard_rank'] == None:\n stats['country_leaderboard_rank_' + mode] = 0\n else:\n stats['country_leaderboard_rank_' + mode] = users_stats[mode]['country_leaderboard_rank']\n stats['play_style'] = users_stats['play_style']\n stats['favourite_mode'] = users_stats['favourite_mode']\n stats['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return stats\n\ndef convert_users_badge(user_id, users_badge):\n badge = {}\n badge['user_id'] = user_id\n badge['badge_id'] = users_badge['id']\n badge['name'] = users_badge['name']\n badge['icon'] = users_badge['icon']\n badge['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return badge\n\ndef convert_users_silence_info(user_id, users_silence_info):\n silence_info = {}\n silence_info['user_id'] = user_id\n silence_info['reason'] = users_silence_info['reason']\n silence_info['end'] = convert_datetime(convert_iso_datetime(users_silence_info['end']))\n silence_info['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return silence_info\n\ndef convert_beatmap(beatmap, mode):\n beatmap_temp = {}\n beatmap_temp['beatmap_id'] = beatmap['beatmap_id']\n beatmap_temp['beatmapset_id'] = beatmap['beatmapset_id']\n beatmap_temp['beatmap_md5'] = beatmap['beatmap_md5']\n beatmap_temp['song_name'] = beatmap['song_name'].replace('\\'', '\\\\\\'')\n beatmap_temp['ar'] = beatmap['ar']\n beatmap_temp['od'] = beatmap['od']\n beatmap_temp['difficulty'] = beatmap['difficulty']\n beatmap_temp['max_combo'] = beatmap['max_combo']\n beatmap_temp['hit_length'] = beatmap['hit_length']\n beatmap_temp['ranked'] = beatmap['ranked']\n beatmap_temp['ranked_status_frozen'] = beatmap['ranked_status_frozen']\n beatmap_temp['latest_update'] = convert_datetime(convert_iso_datetime(beatmap['latest_update']))\n beatmap_temp['mode'] = mode\n beatmap_temp['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return beatmap_temp\n\ndef convert_activity(score, beatmap_id, song_name, ranking):\n activity = {}\n activity['user_id'] = score['user_id']\n activity['score_id'] = score['score_id']\n activity['score'] = score['score']\n activity['beatmap_id'] = beatmap_id\n activity['beatmap_md5'] = score['beatmap_md5']\n activity['song_name'] = song_name.replace('\\'', '\\\\\\'')\n activity['ranking'] = ranking\n if ranking == 1:\n activity['type'] = 1\n elif ranking == -1:\n activity['type'] = 2\n else:\n activity['type'] = 0\n activity['mode'] = score['play_mode']\n activity['rank'] = score['rank']\n activity['archive_on'] = score['time']\n activity['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return activity\n\ndef convert_beatmap_peppy(beatmap_info_peppy):\n beatmap_info = {}\n beatmap_info['beatmap_id'] = beatmap_info_peppy['beatmap_id']\n beatmap_info['beatmapset_id'] = beatmap_info_peppy['beatmapset_id']\n beatmap_info['beatmap_md5'] = beatmap_info_peppy['file_md5']\n song_name = beatmap_info_peppy['artist'] + ' - ' + beatmap_info_peppy['title'] + ' [' + beatmap_info_peppy['version'] + ']'\n beatmap_info['song_name'] = song_name.replace('\\'', '\\\\\\'')\n beatmap_info['ar'] = beatmap_info_peppy['diff_approach']\n beatmap_info['od'] = beatmap_info_peppy['diff_overall']\n beatmap_info['difficulty'] = beatmap_info_peppy['difficultyrating']\n beatmap_info['max_combo'] = beatmap_info_peppy['max_combo']\n beatmap_info['hit_length'] = beatmap_info_peppy['hit_length']\n beatmap_info['ranked'] = -1\n beatmap_info['ranked_status_frozen'] = -1\n beatmap_info['latest_update'] = beatmap_info_peppy['last_update']\n beatmap_info['mode'] = beatmap_info_peppy['mode']\n beatmap_info['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return beatmap_info\n\ndef convert_first_place_score(first_place_score):\n first_place_score['time'] = convert_datetime(convert_iso_datetime(first_place_score['time']))\n return first_place_score\n\ndef convert_monthly_stats(month, latest, oldest):\n monthly_stats = {}\n monthly_stats['user_id'] = latest['user_id']\n monthly_stats['username'] = latest['username']\n monthly_stats['month'] = month\n monthly_stats['ranked_score_std'] = latest['ranked_score_std'] - oldest['ranked_score_std']\n monthly_stats['total_score_std'] = latest['total_score_std'] - oldest['total_score_std']\n monthly_stats['playcount_std'] = latest['playcount_std'] - oldest['playcount_std']\n monthly_stats['replays_watched_std'] = latest['replays_watched_std'] - oldest['replays_watched_std']\n monthly_stats['total_hits_std'] = latest['total_hits_std'] - oldest['total_hits_std']\n monthly_stats['level_std'] = latest['level_std'] - oldest['level_std']\n monthly_stats['accuracy_std'] = latest['accuracy_std'] - oldest['accuracy_std']\n monthly_stats['pp_std'] = latest['pp_std'] - oldest['pp_std']\n monthly_stats['global_leaderboard_rank_std'] = latest['global_leaderboard_rank_std'] - oldest['global_leaderboard_rank_std']\n monthly_stats['country_leaderboard_rank_std'] = latest['country_leaderboard_rank_std'] - oldest['country_leaderboard_rank_std']\n monthly_stats['ranked_score_taiko'] = latest['ranked_score_taiko'] - oldest['ranked_score_taiko']\n monthly_stats['total_score_taiko'] = latest['total_score_taiko'] - oldest['total_score_taiko']\n monthly_stats['playcount_taiko'] = latest['playcount_taiko'] - oldest['playcount_taiko']\n monthly_stats['replays_watched_taiko'] = latest['replays_watched_taiko'] - oldest['replays_watched_taiko']\n monthly_stats['total_hits_taiko'] = latest['total_hits_taiko'] - oldest['total_hits_taiko']\n monthly_stats['level_taiko'] = latest['level_taiko'] - oldest['level_taiko']\n monthly_stats['accuracy_taiko'] = latest['accuracy_taiko'] - oldest['accuracy_taiko']\n monthly_stats['pp_taiko'] = latest['pp_taiko'] - oldest['pp_taiko']\n monthly_stats['global_leaderboard_rank_taiko'] = latest['global_leaderboard_rank_taiko'] - oldest['global_leaderboard_rank_taiko']\n monthly_stats['country_leaderboard_rank_taiko'] = latest['country_leaderboard_rank_taiko'] - oldest['country_leaderboard_rank_taiko']\n monthly_stats['ranked_score_ctb'] = latest['ranked_score_ctb'] - oldest['ranked_score_ctb']\n monthly_stats['total_score_ctb'] = latest['total_score_ctb'] - oldest['total_score_ctb']\n monthly_stats['playcount_ctb'] = latest['playcount_ctb'] - oldest['playcount_ctb']\n monthly_stats['replays_watched_ctb'] = latest['replays_watched_ctb'] - oldest['replays_watched_ctb']\n monthly_stats['total_hits_ctb'] = latest['total_hits_ctb'] - oldest['total_hits_ctb']\n monthly_stats['level_ctb'] = latest['level_ctb'] - oldest['level_ctb']\n monthly_stats['accuracy_ctb'] = latest['accuracy_ctb'] - oldest['accuracy_ctb']\n monthly_stats['pp_ctb'] = latest['pp_ctb'] - oldest['pp_ctb']\n monthly_stats['global_leaderboard_rank_ctb'] = latest['global_leaderboard_rank_ctb'] - oldest['global_leaderboard_rank_ctb']\n monthly_stats['country_leaderboard_rank_ctb'] = latest['country_leaderboard_rank_ctb'] - oldest['country_leaderboard_rank_ctb']\n monthly_stats['ranked_score_mania'] = latest['ranked_score_mania'] - oldest['ranked_score_mania']\n monthly_stats['total_score_mania'] = latest['total_score_mania'] - oldest['total_score_mania']\n monthly_stats['playcount_mania'] = latest['playcount_mania'] - oldest['playcount_mania']\n monthly_stats['replays_watched_mania'] = latest['replays_watched_mania'] - oldest['replays_watched_mania']\n monthly_stats['total_hits_mania'] = latest['total_hits_mania'] - oldest['total_hits_mania']\n monthly_stats['level_mania'] = latest['level_mania'] - oldest['level_mania']\n monthly_stats['accuracy_mania'] = latest['accuracy_mania'] - oldest['accuracy_mania']\n monthly_stats['pp_mania'] = latest['pp_mania'] - oldest['pp_mania']\n monthly_stats['global_leaderboard_rank_mania'] = latest['global_leaderboard_rank_mania'] - oldest['global_leaderboard_rank_mania']\n monthly_stats['country_leaderboard_rank_mania'] = latest['country_leaderboard_rank_mania'] - oldest['country_leaderboard_rank_mania']\n monthly_stats['created_on'] = convert_datetime(datetime.now(pytz.timezone('UTC')))\n return monthly_stats\n\ndef convert_iso_datetime(iso_str):\n dt = None\n if \":\" == iso_str[-3:-2]:\n iso_str = iso_str[:-3]+iso_str[-2:]\n try:\n dt = datetime.strptime(iso_str, '%Y-%m-%dT%H:%M:%S%Z')\n dt = pytz.utc.localize(dt).astimezone(pytz.timezone('UTC'))\n except ValueError:\n try:\n dt = datetime.strptime(iso_str, '%Y-%m-%dT%H:%M:%S%z')\n dt = dt.astimezone(pytz.timezone('UTC'))\n except ValueError:\n pass\n return dt\n\ndef convert_datetime(dt):\n return dt.strftime('%Y-%m-%d %H:%M:%S')\n" }, { "alpha_fraction": 0.5398633480072021, "alphanum_fraction": 0.5569475889205933, "avg_line_length": 31.518518447875977, "blob_id": "af18865a6ec00e57c34443f15b60a0366e809312", "content_id": "ad04f28d641f480a81ba128a3efe9f240ab9d2c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/ors/special/regist_user.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import sys\nfrom ors.script import converter\nfrom ors.script import database\nfrom ors.script import util\n\n# CHUDAN #\nif __name__ == \"__main__\":\n from ors.special.regist_user import RegistUser\n RegistUser().execute(sys.argv[1:])\n\nclass RegistUser(object):\n def execute(self, user_ids):\n user_ids = [1000]\n for user_id in user_ids:\n print(user_id)\n # Duplicate check.\n result = database.execute_statement('m_users_001', user_id)\n if result[1][0]['count'] == 0:\n # Regist user.\n now = util.datetime_now()\n now = converter.convert_datetime(now)\n result = database.execute_statement('m_users_002', user_id, 1, now)\n print(result)\n else:\n # Nothing to do.\n\n print('user_id[%s] is already regsistred.')\n" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.6065573692321777, "avg_line_length": 7.714285850524902, "blob_id": "f6e44c6fd07b2739f23dd65b40c9ce6bf2f8e34a", "content_id": "578fe5c64894085540472e8fd5a93e8f3a3d819b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 61, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/sql/m_first_place_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n m_first_place\nWHERE\n beatmap_md5 = '%s'\n;\n" }, { "alpha_fraction": 0.45375722646713257, "alphanum_fraction": 0.4566473960876465, "avg_line_length": 9.176470756530762, "blob_id": "7ccad2b529601c3dc897070300a0e66a1fb24dfe", "content_id": "0464efec40809ba2d05b80af9ef803fe33764990", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 346, "license_type": "no_license", "max_line_length": 25, "num_lines": 34, "path": "/sql/w_beatmaps_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n w_beatmaps(\n beatmap_id,\n beatmapset_id,\n beatmap_md5,\n song_name,\n ar,\n od,\n difficulty,\n max_combo,\n hit_length,\n ranked,\n ranked_status_frozen,\n latest_update,\n mode,\n created_on\n )\nVALUES (\n %s,\n %s,\n '%s',\n '%s',\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n %s,\n '%s',\n %s,\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.4577464759349823, "alphanum_fraction": 0.4577464759349823, "avg_line_length": 10.833333015441895, "blob_id": "2622cfb006e80148b8400bd9df827d24cee3078a", "content_id": "691999c6168bf14e909b80a0c46c06e0f574caf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 142, "license_type": "no_license", "max_line_length": 26, "num_lines": 12, "path": "/sql/l_scores_on_first_place_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n l_scores_on_first_place(\n user_id,\n score_id,\n type,\n created_on\n ) VALUES (\n %s,\n %s,\n %s,\n '%s'\n );\n" }, { "alpha_fraction": 0.5562130212783813, "alphanum_fraction": 0.5562130212783813, "avg_line_length": 11.071428298950195, "blob_id": "1c9f12c26242cd8280f71044ed89200508cf3b0e", "content_id": "13d9c0778270deb3994d52dc8fdd71cd8f099100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 169, "license_type": "no_license", "max_line_length": 33, "num_lines": 14, "path": "/sql/t_users_badges_S01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n *\nFROM\n t_users_badges\nWHERE\n created_on = (\n SELECT\n MAX(created_on)\n FROM\n t_users_badges\n WHERE\n t_users_badges.user_id = %s\n )\n;\n" }, { "alpha_fraction": 0.4375, "alphanum_fraction": 0.4375, "avg_line_length": 8, "blob_id": "99c4eb112895e8bd3af23bef742e827204e2cb05", "content_id": "0e52a20a64685dcf7965a39863f5e4c1569c05f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 144, "license_type": "no_license", "max_line_length": 17, "num_lines": 16, "path": "/sql/w_users_badges_I01.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "INSERT INTO\n w_users_badges(\n user_id,\n badge_id,\n name,\n icon,\n created_on\n )\nVALUES (\n %s,\n %s,\n '%s',\n '%s',\n '%s'\n)\n;\n" }, { "alpha_fraction": 0.5715509653091431, "alphanum_fraction": 0.5863324999809265, "avg_line_length": 50.86666488647461, "blob_id": "4815901626c45fcf3df765f9e77b71c1faf4f362", "content_id": "d868a4310c124c9cc45c158af6325cf8be437e33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4668, "license_type": "no_license", "max_line_length": 153, "num_lines": 90, "path": "/ors/main/users_scores_master.py", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "import traceback\nimport sys\nimport os\nfrom ors.script.database import Database\nfrom ors.script.ripple_api import RippleApi\nfrom ors.script import converter\nfrom ors.script import logger\nfrom ors.script import util\n\nif __name__ == \"__main__\":\n from ors.main.users_scores_master import UsersScoresMaster\n UsersScoresMaster().execute()\n\nclass UsersScoresMaster(object):\n global log\n global database\n global connection\n log = logger.logger('users_scores_master')\n database = Database()\n connection = database.get_connection()\n\n def execute(self):\n try:\n log.info('ORSI0001', 'UsersScoresMaster')\n user_ids = self.__get_target_user_ids()\n for __user_id in user_ids:\n user_id = __user_id['user_id']\n new_scores = self.__get_users_scores_transaction(user_id)\n self.__set_users_scores_master(new_scores, user_id)\n connection.commit()\n connection.close()\n log.info('ORSI0002', 'UsersScoresMaster')\n except Exception as e:\n log.critical('ORSC0001', 'UsersScoreMaster', e)\n raise Exception(e)\n\n def __get_target_user_ids(self):\n result = database.execute_statement(connection, 'm_users_003')\n user_ids = result[1]\n return user_ids\n\n def __get_users_scores_transaction(self, user_id):\n result = database.execute_statement(connection, 't_users_scores_S01', user_id)\n log.debug('ORSD0010', 't_users_scores', result[0], user_id)\n return result[1]\n\n def __set_users_scores_master(self, new_scores, user_id):\n new_counter = 0\n updated_counter = 0\n not_updated_counter = 0\n for new_score in new_scores:\n result = database.execute_statement(connection, 'm_users_scores_S01', user_id, new_score['beatmap_md5'])\n master_score = result[1]\n if bool(master_score) == False:\n # Case of new score.\n del new_score['updated_on']\n result = database.execute_statement_values(connection, 'm_users_scores_I01', new_score.values())\n beatmap_info = util.get_beatmap_info(new_score['beatmap_md5'], new_score['play_mode'])\n song_name = beatmap_info['song_name']\n log.debug('ORSD0011', new_score['user_id'], new_score['score_id'], song_name, new_score['score'], new_score['rank'])\n # Mark the score has processed on transaction.\n result = database.execute_statement(connection, 'l_scores_on_master_I01', user_id, new_score['score_id'], 1, new_score['created_on'])\n new_counter = new_counter + 1\n else:\n # Case of updated or not score.\n master_score = master_score[0]\n master_scores_score = master_score['score']\n new_scores_score = new_score['score']\n if new_scores_score > master_scores_score:\n # Case of updated score.\n created_on = new_score['created_on']\n del new_score['created_on']\n del new_score['updated_on']\n new_score.update(beatmap_md5_key=new_score['beatmap_md5'])\n result = database.execute_statement_values(connection, 'm_users_scores_U01', new_score.values())\n beatmap_info = util.get_beatmap_info(new_score['beatmap_md5'], new_score['play_mode'])\n song_name = beatmap_info['song_name']\n log.debug('ORSD0012', new_score['user_id'], new_score['score_id'], song_name, new_score['score'], new_score['rank'])\n # Mark the score has processed on transaction.\n result = database.execute_statement(connection, 'l_scores_on_master_I01', user_id, new_score['score_id'], 2, created_on)\n updated_counter = updated_counter + 1\n else:\n # Case of not updated score.\n beatmap_info = util.get_beatmap_info(new_score['beatmap_md5'], new_score['play_mode'])\n song_name = beatmap_info['song_name']\n log.debug('ORSD0013', new_score['user_id'], new_score['score_id'], song_name, new_score['score'], new_score['rank'])\n # Mark the score has processed on transaction.\n result = database.execute_statement(connection, 'l_scores_on_master_I01', user_id, new_score['score_id'], 3, new_score['created_on'])\n not_updated_counter = not_updated_counter + 1;\n log.info('ORSI0010', new_counter, updated_counter, not_updated_counter)\n" }, { "alpha_fraction": 0.5317585468292236, "alphanum_fraction": 0.5328084230422974, "avg_line_length": 32.421051025390625, "blob_id": "9a26d73f0b8f72b1842cf8d737a55e91cd88fde4", "content_id": "947cb39e671aeab0ce23fd0a5be4a9b078fd11d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1905, "license_type": "no_license", "max_line_length": 150, "num_lines": 57, "path": "/www/score-search.php", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "<?php require_once('./functions.php') ?>\n<?php\n if (isset($_GET['score-id'])) {\n $score_id = $_GET['score-id'];\n } else {\n $score_id = null;\n }\n function print_score_id_detail($score_id) {\n if ($score_id != null) {\n $pdo = get_pdo();\n $query = 'SELECT * FROM t_users_scores INNER JOIN m_beatmaps ON t_users_scores.beatmap_md5 = m_beatmaps.beatmap_md5 WHERE score_id = :score_id';\n $statement = $pdo -> prepare($query);\n $statement -> execute([':score_id' => $score_id]);\n $score_data = ($row = $statement -> fetch(PDO::FETCH_ASSOC));\n if ($score_data) {\n $user_id = $score_data['user_id'];\n $score_id = $score_data['score_id'];\n $mods = $score_data['mods'];\n $rank = $score_data['rank'];\n $time = $score_data['time'];\n $accuracy = $score_data['accuracy'];\n $pp = $score_data['pp'];\n print('<table class=\"ui celled table\">');\n print('<thead>');\n foreach ($score_data as $key => $value) {\n print('<tr><td>'.$key.'</td><td>'.$value.'</td></tr>');\n }\n print('</thead>');\n print('</table>');\n } else {\n print('<b>score_id not found.</b>');\n }\n }\n }\n?>\n<html>\n<head>\n <title>ORS Score Search</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"semantic/dist/semantic.min.css\">\n <script src=\"semantic/dist/semantic.min.js\"></script>\n</head>\n<body>\n <div class=\"ui grid\">\n <div class=\"four wide column\"></div>\n <div class=\"eight wide column\">\n <form class=\"ui form\">\n <div class=\"field\">\n <label>score_id</label>\n <input type=\"text\" name=\"score-id\" placeholder=\"Enter score_id\">\n </div>\n <button class=\"ui button\" type=\"submit\">Search</button>\n </form>\n <?php print_score_id_detail($score_id) ?>\n </div>\n <div class=\"four wide column\"></div>\n </div>\n</html>\n" }, { "alpha_fraction": 0.6626794338226318, "alphanum_fraction": 0.6674641370773315, "avg_line_length": 15.720000267028809, "blob_id": "4c760eb5c73b53f5a814724dfc2ca7af2871aba8", "content_id": "d472eee5662bcd68f41fbad90b980626067f6866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 418, "license_type": "no_license", "max_line_length": 54, "num_lines": 25, "path": "/sql/t_users_activity_S04.sql", "repo_name": "ellsharp/osuRippleStats", "src_encoding": "UTF-8", "text": "SELECT\n score_id,\n beatmap_md5\nFROM\n t_users_activity\nWHERE NOT EXISTS(\n SELECT\n *\n FROM\n m_first_place\n WHERE\n t_users_activity.score_id = m_first_place.score_id\n) AND NOT EXISTS (\n SELECT\n *\n FROM\n l_scores_on_first_place list\n WHERE\n list.score_id = t_users_activity.score_id\n) AND\n t_users_activity.type = 1 AND\n t_users_activity.user_id = %s\nORDER BY\n t_users_activity.score_id\nASC;\n" } ]
79
jazzbob/stb
https://github.com/jazzbob/stb
ba214c5588f9a509a5afd8d91d69a5fddb88a3f9
61d35a8682b59cd904c84f1111dbbf6f00afa6cd
ceb6192944ea5fca6a7e69f0c224847004bdff77
refs/heads/master
2021-01-21T07:08:20.642537
2017-03-03T08:57:02
2017-03-03T08:57:02
83,323,850
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5786387920379639, "alphanum_fraction": 0.596864640712738, "avg_line_length": 30.510040283203125, "blob_id": "89ac237ca1268b38d00cff45980f171c630e61ed", "content_id": "e285f631b6ed5e432f4eb0840d6766eb34d131d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7846, "license_type": "no_license", "max_line_length": 112, "num_lines": 249, "path": "/stb.py", "repo_name": "jazzbob/stb", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.stats import beta\nimport matplotlib.pyplot as plt\nimport copy\nimport seaborn as sns\nimport os\nfrom datetime import datetime\n\n\nclass Belief:\n def __init__(self):\n self.successes = 1\n self.failures = 1\n self.data = []\n\n def update(self, sat):\n if sat:\n self.successes += 1\n else:\n self.failures += 1\n\n def dist(self):\n return beta(self.successes, self.failures)\n\n def sample(self):\n return beta(self.successes, self.failures).rvs()\n\n def std(self):\n return beta(self.successes, self.failures).std()\n\n def mode(self):\n return self.successes / (self.successes + self.failures)\n\n def mean(self):\n return beta(self.successes, self.failures).mean()\n\n def median(self):\n return beta(self.successes, self.failures).median()\n\n def cv(self):\n return self.std() / self.mean()\n\n def ppf(self, q):\n return beta(self.successes, self.failures).ppf(q)\n\n def print_stats(self):\n print('{:.2} {:.2} {:.2} {:.2}'.format(self.std(), self.cv(), self.median(), self.ppf(0.1)))\n\n\nclass Vanilla:\n def __init__(self):\n self.planning_depth = planning_depth\n self.best_plan = None\n self.best_value = 0.\n\n def get_random_plan(self):\n return [['up', 'down', 'left', 'right'][np.random.randint(0, 4)] for _ in range(self.planning_depth)]\n\n def update(self, plan, value):\n if value > self.best_value:\n self.best_plan = plan\n\n\nclass Planner:\n def __init__(self):\n self.planning_depth = planning_depth\n self.safety_belief_dists = [self.get_prior_safety_belief() for _ in range(0, self.planning_depth)]\n\n def get_plan(self, function):\n plan = []\n for d in range(0, self.planning_depth):\n values = dict()\n for action, belief in self.safety_belief_dists[d].items():\n values[action] = function(belief)\n # get action with maximum sampled value\n max_action = max(values.keys(), key=(lambda k: values[k]))\n plan.append(max_action)\n return plan\n\n def update_beliefs(self, plan, rewards):\n sat = np.sum(rewards) >= required_reward\n for d, action in enumerate(plan):\n self.safety_belief_dists[d][action].update(sat)\n\n def update_beliefs_markovian(self, plan, rewards):\n inverse_cumulative_rewards = np.cumsum(rewards[::-1])[::-1]\n for d, action in enumerate(plan):\n sat = inverse_cumulative_rewards[d] >= required_reward\n self.safety_belief_dists[d][action].update(sat)\n\n def update_beliefs_per_action(self, plan, rewards):\n for d, action in enumerate(plan):\n self.safety_belief_dists[d][action].update(rewards[d] + 1)\n\n def get_prior_safety_belief(self):\n return dict(right=Belief(), left=Belief(), up=Belief(), down=Belief())\n\n\nclass Agent:\n def __init__(self):\n self.pos = [0, 0]\n self.p_fail = np.random.uniform(0, 1)\n self.actions = dict(right=(1, 0), left=(-1, 0), up=(0, 1), down=(0, -1))\n\n def update(self, action):\n action = self.actions[action]\n if np.random.uniform(0, 1) <= self.p_fail:\n action = (action[0] * -1, action[1] * -1)\n self.pos[0] += action[0]\n self.pos[1] += action[1]\n\n\nclass World:\n def __init__(self, x_dim=10, y_dim=10):\n self.grid = np.random.rand(x_dim, y_dim)\n self.init_grid()\n self.agent = Agent()\n\n def init_grid(self):\n for x in range(len(self.grid)):\n for y in range(len(self.grid)):\n self.grid[x, y] = np.random.choice([0, -1], p=[0.8, 0.2])\n\n def update(self, action):\n self.agent.update(action)\n self.agent.pos[0] = np.clip(self.agent.pos[0], 0, 9)\n self.agent.pos[1] = np.clip(self.agent.pos[1], 0, 9)\n reward = self.grid[self.agent.pos[0], self.agent.pos[1]]\n return reward\n\n def execute_plan(self, plan):\n rewards = []\n for action in plan:\n r = self.update(action)\n rewards.append(r)\n return rewards\n\n\ndef estimate_satisfaction_probability(world, plan):\n plan_sats = []\n for j in range(0, 1000):\n world_copy = copy.deepcopy(world)\n rewards = world_copy.execute_plan(plan)\n sat = np.sum(rewards) >= required_reward\n plan_sats.append(sat)\n return np.mean(plan_sats)\n\n\ndirectory = \"plots/stp/{}\".format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nnp.random.seed(424242)\nplt.ion()\nplt.show()\n\nplanning_depth = 10\nrequired_reward = -2\n\nsats = []\nestimated_sats = []\nestimated_sats_best = []\nsampled_tails = []\nbest_plan_lower_tails = []\nbest_plan_upper_tails = []\ncvs = []\nmodes = []\nbest_cvs = []\nbest_modes = []\n\nworld = World()\nprint('p fail {}'.format(world.agent.p_fail))\n\nvanilla = Vanilla()\nmax_p = 0.\nfor i in range(100):\n print('vanilla', i, end=\"\\r\")\n plan = vanilla.get_random_plan()\n p = estimate_satisfaction_probability(world, plan)\n vanilla.update(plan, p)\n if p > max_p:\n max_p = p\n\nplanner = Planner()\nfor i in range(0, 10000):\n plan = planner.get_plan(Belief.sample)\n\n world_copy = copy.deepcopy(world)\n rewards = world_copy.execute_plan(plan)\n\n sat = np.sum(rewards) >= required_reward\n sats.append(sat)\n\n planner.update_beliefs(plan, rewards)\n # planner.update_beliefs_markovian(plan, rewards)\n # planner.update_beliefs_per_action(plan, rewards)\n\n estimated_sats.append(estimate_satisfaction_probability(world, plan))\n mode = np.mean([planner.safety_belief_dists[i][a].mode() for i, a in enumerate(plan)])\n modes.append(mode)\n cv = np.mean([planner.safety_belief_dists[i][a].std() / planner.safety_belief_dists[i][a].mode() for i, a in\n enumerate(plan)])\n cvs.append(cv)\n\n best_plan = planner.get_plan(Belief.mode)\n estimated_sats_best.append(estimate_satisfaction_probability(world, best_plan))\n best_mode = np.mean([planner.safety_belief_dists[i][a].mode() for i, a in enumerate(best_plan)])\n best_modes.append(best_mode)\n cv = [planner.safety_belief_dists[i][a].std() / planner.safety_belief_dists[i][a].mode() for i, a in\n enumerate(best_plan)]\n cv = np.mean(cv)\n best_cvs.append(cv)\n\n sampled_tails.append(\n np.min([planner.safety_belief_dists[d][action].ppf(0.1) for d, action in enumerate(plan)]))\n\n best_plan_lower_tails.append(planner.safety_belief_dists[0][best_plan[0]].ppf(0.1))\n best_plan_upper_tails.append(planner.safety_belief_dists[0][best_plan[0]].ppf(0.9))\n\n if i % 10 == 0:\n\n plt.figure(1)\n plt.clf()\n\n plt.xlim((-1, len(estimated_sats)))\n\n plt.scatter(range(len(estimated_sats)), estimated_sats, color=(1, 0.5, 0, 0.5),\n label='$\\hat{\\mathrm{P}}$(sat)')\n plt.scatter(range(len(estimated_sats_best)), estimated_sats_best, color=(0, 0.5, 1, 0.5),\n label='$\\hat{\\mathrm{P}}^*$(sat)')\n plt.axhline(max_p, ls='--')\n plt.legend(loc='best', fancybox=False, framealpha=0.5)\n plt.savefig('{}/estimation.png'.format(directory))\n plt.pause(0.001)\n\n plt.figure(2)\n plt.clf()\n plt.xlim((0, len(modes)))\n plt.plot(range(len(modes)), modes, color=(1, 0.5, 0, 1), ls=\"--\", label=\"sampled avg. mode\")\n plt.plot(range(len(best_modes)), best_modes, color=(0, 0.5, 1, 1), ls=\"--\", label=\"best avg. mode\")\n plt.plot(range(len(cvs)), cvs, color=(1, 0.5, 0, 1), label=\"sampled avg. CV\")\n plt.plot(range(len(best_cvs)), best_cvs, color=(0, 0.5, 1, 1), label=\"best avg. CV\")\n plt.legend(loc='best')\n plt.savefig('{}/cvs.png'.format(directory))\n plt.pause(0.001)\n\n\nplt.ioff()\nplt.show()\n" }, { "alpha_fraction": 0.8104265332221985, "alphanum_fraction": 0.8246445655822754, "avg_line_length": 125.80000305175781, "blob_id": "f85307f5ee135ef923eeac86cac4dcc40f8c1515", "content_id": "075dc033169fc1ffc8265cdee81df9b418bb037a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 633, "license_type": "no_license", "max_line_length": 551, "num_lines": 5, "path": "/README.md", "repo_name": "jazzbob/stb", "src_encoding": "UTF-8", "text": "# Stacked Thompson Bandits\n\nWe introduce Stacked Thompson Bandits (STB) for efficiently generating plans that are likely to satisfy a given bounded temporal logic requirement. STB uses a simulation for evaluation of plans, and takes a Bayesian approach to using the resulting information to guide its search. In particular, we show that stacking multiarmed bandits and using Thompson sampling to guide the action selection process for each bandit enables STB to generate plans that satisfy requirements with a high probability while only searching a fraction of the search space.\n\nPaper on arXiv: https://arxiv.org/pdf/1702.08726.pdf" } ]
2
BitlyTwiser/Down-Detector
https://github.com/BitlyTwiser/Down-Detector
e3e94a80f213127ba05ed7ffab57478338c43e6c
8925846a2455268a7190137ec1d6a00c0cf31f2e
a41827f0c14a772cf1fc5d777de90a2ec4470529
refs/heads/master
2020-05-30T19:27:38.636723
2019-06-28T03:50:50
2019-06-28T03:50:50
189,925,507
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41104793548583984, "alphanum_fraction": 0.41429731249809265, "avg_line_length": 33.19444274902344, "blob_id": "4f785c337a63ca24fc5651e677e271ba232d1046", "content_id": "26f446d877b962c7183d8f1684882182681bed37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1231, "license_type": "no_license", "max_line_length": 91, "num_lines": 36, "path": "/README.md", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "```\n _____ _____ _ _ \n | __ \\ | __ \\ | | | | \n | | | | _____ ___ __ ______| | | | ___| |_ ___ ___| |_ ___ _ __ \n | | | |/ _ \\ \\ /\\ / / '_ \\______| | | |/ _ \\ __/ _ \\/ __| __/ _ \\| '__|\n | |__| | (_) \\ V V /| | | | | |__| | __/ || __/ (__| || (_) | | \n |_____/ \\___/ \\_/\\_/ |_| |_| |_____/ \\___|\\__\\___|\\___|\\__\\___/|_| \n \n\n\\\\ Curated by: Josh Groeschl\n\\\\ Maintained By: Josh Groeschl\n\n```\n# Down-Detector\n\n## Premise: \n- CLI utility tool to detect if websites are responding to HTTP/HTTPS requests.\n- Will receive single URL's or a CSV file of URL's\n- Performs basic URL parsing to validate correctness of a URL before attempting connection.\n\n## Usage:\n- Build/install dependencies\n```python setup.py install```\n- run program:\n```downdetect --url https://google.com```\n- Or run the program with a CSV list of URL's:\n```downdetect --csv_file <Csv_File_Location>```\n- run program with a counter: (10 checks)\n- Note: This feature is NOT available for CSV.\n```downdetect --url google.com --c 10```\n\n## Dependencies:\n- click\n- colorama\n- requests\n- structlog\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 11.615385055541992, "blob_id": "940bea3fe4f17ca7ffc61a7b50143ac15cf27202", "content_id": "5566b66f8d853c565f892fd53981254d83a07ef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/src/downdetect/__init__.py", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "import click\n\ntry:\n from . import down\nexcept ImportError:\n import down\n\[email protected]()\ndef downdetect():\n pass\n\n\ndowndetect.add_command(down.down_detect)\n" }, { "alpha_fraction": 0.5759385824203491, "alphanum_fraction": 0.5767918229103088, "avg_line_length": 23.93617057800293, "blob_id": "a230f2a1a739374fa340beb8c9c84e36d483d95a", "content_id": "4efd1edf8cdf362e97d919af6d95a61a614f0526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 75, "num_lines": 47, "path": "/tests/test_csv_parse.py", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\n# Testing file for CSV parsing and testing URL's\nimport csv\nfrom urllib.parse import urlparse\nfrom colorama import Fore, Style\n\n\ndef __url_validator(url):\n \"\"\"\n Validates that the URL's passed in appear to be valid.\n \"\"\"\n parsed = urlparse(url)\n if parsed.scheme == '' or parsed.scheme is None:\n print(Fore.RED + 'It appers there is not scheme given to this URL,'\n 'we will only attempt HTTPS, '\n 'if you desire HTTP please provide this with the URL.')\n return {f'https://{parsed.path}'}\n elif parsed.scheme == 'https':\n pass\n elif parsed.scheme == 'http':\n pass\n else:\n print(Fore.RED + f'It appears that the given scheme '\n '{parsed.scheme} is not HTTPS, HTTP, nor Null.. '\n 'This URL has been added to an ignore list and will not'\n 'be checked')\n \n\n\ndef parse(csv_path):\n with open(csv_path, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n __url_validator(row)\n yield \n\ndef check():\n pass\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5659509301185608, "alphanum_fraction": 0.5950919985771179, "avg_line_length": 23.148147583007812, "blob_id": "e7e89f1cfbe4f7115f6c13af8cc5a373fd6be8e2", "content_id": "87d33d6fc608c0e18171affe82a8c53eaf144449", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 98, "num_lines": 27, "path": "/setup.py", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\ninstall_requires = [\n 'click==7.0',\n 'requests==2.22.0',\n 'colorama==0.4.1',\n 'bumpversion==0.5.3',\n 'structlog==19.1.0'\n]\n\nsetup(\n name='downdetect',\n version='0.0.1',\n author='Joshua Groeschl',\n author_email='[email protected]',\n description='A utility tool for detecting if a website is responding to HTTP/HTTPS requests.',\n license='MIT',\n py_modules=['down'],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'downdetect = downdetect.down:down_detect',\n ]\n },\n package_dir={'': 'src'},\n packages=find_packages('src')\n)\n" }, { "alpha_fraction": 0.5061728358268738, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 15.199999809265137, "blob_id": "88c7dc4312ec845f94007bdce81162a61915b422", "content_id": "23e7ef97dec8d08b57e9a443923ee63c4fcee117", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 81, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/requirements.txt", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "requests==2.22.0\ncolorama==0.4.1\nclick==7.0\nbumpversion==0.5.3\nstructlog==19.1.0\n" }, { "alpha_fraction": 0.5416274666786194, "alphanum_fraction": 0.5446848273277283, "avg_line_length": 35.97391128540039, "blob_id": "53f48ff2d0a3a56dfe478a55af8d64947f9c410d", "content_id": "01077d5d19c6ad88daf3778c96b8be809e93715b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4252, "license_type": "no_license", "max_line_length": 160, "num_lines": 115, "path": "/src/downdetect/down.py", "repo_name": "BitlyTwiser/Down-Detector", "src_encoding": "UTF-8", "text": "# TODO: Add a counter of times you want to test the site\n\nimport requests\nimport click\nimport csv\nimport os\nfrom colorama import Fore, init, Style\nfrom urllib.parse import urlparse\n\n\n# Globally set the init for colorama to avoid having to perform Style.\n# # RESET_ALL after every line color.\ninit(autoreset=True) # Set Auto-Reset to True.\n\n\nclass DownDetector():\n\n def __init__(self):\n self.ignore = []\n self.good = u'\\u2713'\n self.bad = u'\\u0078'\n\n def parse_csv(self, csv_loc):\n \"\"\"\n Method to parse the given CSV file.\n ----\n Yields a generator of URLS to test.\n ----\n CSV location will be given via the CLI argument --CSV\n \"\"\"\n valid_urls = []\n if os.path.isfile(csv_loc):\n with open(csv_loc, 'r') as csv_contents:\n for row in csv_contents:\n row = str(row).rstrip('\\n').rstrip(',')\n if self.url_validator(row) is False:\n continue\n else:\n valid_urls.append(row)\n if valid_urls != []:\n return valid_urls\n else:\n return False\n else:\n print(Fore.RED + f'The provided file does not seem to exist, Please check the path give. Path:{csv_loc}')\n\n def detect(self, url):\n \"\"\"\n Method used to detect if an endpoints is unable to be reached.\n \"\"\"\n for u in url:\n try:\n print(f'Testing Site: {u}')\n requests.get(str(u))\n except ConnectionError:\n print(Fore.RED + f'{self.bad}' + Style.RESET_ALL +\n ': Error Connecting to Website!\\n')\n except:\n print(Fore.RED + f'{self.bad}' + Style.RESET_ALL + ': Error Connecting to Website!\\n')\n else:\n print(Fore.GREEN + f'{self.good}' + Style.RESET_ALL + ': '\n 'Successful Connection! The site appears to be up.\\n')\n\n def url_validator(self, url):\n \"\"\"\n Validates that the URL's passed in appear to be at least semi valid.\n ----\n Note: This should be a list, if len is GT 1 loop here instead of else where in the code.\n \"\"\"\n parsed = urlparse(url)\n if parsed.scheme == '' or parsed.scheme is None:\n print(Fore.RED + f'It appers there is not scheme given to this URL {parsed.path},'\n 'we will only attempt HTTPS, '\n 'if you desire HTTP please provide this with the URL.\\n')\n return f'https://{parsed.path}'\n elif parsed.scheme == 'https':\n return parsed.geturl()\n elif parsed.scheme == 'http':\n return parsed.geturl()\n else:\n print(Fore.RED + 'It appears that the given scheme '\n f'{parsed.scheme} for url {parsed.geturl()} is not HTTPS, HTTP, nor Null.. '\n 'This URL has been added to an ignore list and will not '\n 'be checked\\n')\n pass\n\[email protected]()\[email protected]('--csv_file', default=False, help='Command Line Argument to add a CSV file of URLs.')\[email protected]('--url', default=False, help='Command Line Argument allowing one to pass in a single URL.')\[email protected]('--c', default=1, help='Accepts an Int value. If this flag is added the URL will be checked X number of times. (Note: Only works for single URL)')\ndef down_detect(csv_file, url, c):\n \"\"\"\n Function to tie all components of the DownDetector\n\n class together and CLI build.\n \"\"\"\n detect = DownDetector()\n if csv_file: \n urls = detect.parse_csv(csv_file)\n if urls:\n detect.detect(list(urls))\n else:\n print(Fore.RED + 'It appares that there were not valid URLs, '\n 'please check the given csv')\n if url:\n try:\n c = int(c)\n except ValueError:\n print(Fore.RED + 'This appears to not be an integer value.. '\n 'please enter a base10 value')\n else:\n urls = detect.url_validator(url=url)\n if urls:\n for count in range(0, c):\n detect.detect([urls])\n" } ]
6
Akenaide/timon
https://github.com/Akenaide/timon
01fde90f8389be6e02e06b56a1ad64879393e4b8
8d2c94eba4c7862b51753ac920725f0c2c12112a
9c4ed44bc2be274a910e351d7db2777d064c957a
refs/heads/master
2020-03-27T00:55:32.552237
2018-04-23T12:15:42
2018-04-23T12:15:42
145,667,383
0
0
BSD-3-Clause
2018-08-22T06:40:05
2018-04-29T18:19:12
2018-04-29T18:19:17
null
[ { "alpha_fraction": 0.7448979616165161, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 23.5, "blob_id": "a16725bd799eb1fbfba68595983d2c747885c886", "content_id": "9d0286c8ce8b1e1918f64b8590f376eaf64f50c2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "permissive", "max_line_length": 33, "num_lines": 4, "path": "/timon/benchmarks/none.py", "repo_name": "Akenaide/timon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# script that does almost nothing\nimport sys\nprint(len(sys.modules.keys()))\n" }, { "alpha_fraction": 0.5746197700500488, "alphanum_fraction": 0.5831748843193054, "avg_line_length": 24.349397659301758, "blob_id": "b4164445d0fa23850f172f26cef376c47af00cc7", "content_id": "cb14b0ec1e0411eda10ab45deab356aceb9d4a20", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2104, "license_type": "permissive", "max_line_length": 77, "num_lines": 83, "path": "/timon/scripts/isup.py", "repo_name": "Akenaide/timon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport requests\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n\nfrom .flags import FLAG_MAP\n\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n\ndef isup(url, timeout=10, verify_ssl=True, cert=None):\n error = False\n error_msg = \"\"\n try:\n resp = requests.get(url, timeout=10, verify=verify_ssl, cert=cert)\n except Exception as exc:\n error = True\n error_msg = repr(exc)\n if error:\n status = \"ERROR\"\n print(status, error_msg)\n else:\n s_code = resp.status_code\n status = \"OK\" if s_code in [200] else \"ERROR\"\n print(status, resp.status_code)\n return status\n\n\ndef mk_parser():\n import argparse # noqa\n description = \"checks whether a web server is up\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\n \"--verify_ssl\",\n default=\"True\",\n help=\"True to verify SSL. False to not check SSL (default=True)\")\n parser.add_argument(\n \"--key\",\n help=\"file name of client cert's key\")\n parser.add_argument(\n \"--cert\",\n help=\"file name of client cert\")\n parser.add_argument(\n \"host_url\",\n help=\"host's url\")\n return parser\n\n\ndef main():\n args = sys.argv[1:]\n if len(args) > 1 or \"-h\" in args or \"--help\" in args:\n parser = mk_parser()\n options = parser.parse_args(args)\n host_url = options.host_url\n else:\n options = None\n host_url = args[0]\n\n error = False\n error_msg = \"\"\n status = \"UNKNOWN\"\n if options is None:\n status = isup(host_url, timeout=10)\n else:\n verify_ssl = options.verify_ssl[0].lower() in \"ty1\"\n if verify_ssl:\n cert = (options.cert, options.key)\n else:\n cert = None\n status = isup(host_url, timeout=10, verify_ssl=verify_ssl, cert=cert)\n\n if error:\n status = \"ERROR\"\n print(status, error_msg)\n exit(FLAG_MAP[status])\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 22.294116973876953, "blob_id": "520b8a81477a7024b6f5d551578a3f19d1db24c5", "content_id": "2bdcf775989af6655bdbcd1e9dbe45c53b3d10a2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/timon/probe_if.py", "repo_name": "Akenaide/timon", "src_encoding": "UTF-8", "text": "from minibelt import import_from_path\n\n# cache of probes\nprobes = {}\n\n\ndef get_probe_cls(cls_name):\n \"\"\" cache for probe modules \"\"\"\n probes[cls_name] = probe = (\n probes.get(cls_name) or import_from_path(cls_name))\n return probe\n\n\ndef mk_probe(cls_name, *args, **kwargs):\n \"\"\" creates a probe instance \"\"\"\n probe = get_probe_cls(cls_name)\n return probe(*args, **kwargs)\n" }, { "alpha_fraction": 0.585185170173645, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 11.090909004211426, "blob_id": "8e2bd4466fbe8d673fbe749da678a2d74d25a850", "content_id": "7b15f7db4c91209b5a8672c20c20f8a244bb4a0f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 135, "license_type": "permissive", "max_line_length": 27, "num_lines": 11, "path": "/tox.ini", "repo_name": "Akenaide/timon", "src_encoding": "UTF-8", "text": "[tox]\nenvlist=py35\n py36\n pypy3\n\n\n[testenv]\ndeps =\n -rrequirements.txt \n -rrequirements/test.txt\ncommands=pytest\n\n\n" } ]
4
dtnewman/zappa_boilerplate
https://github.com/dtnewman/zappa_boilerplate
508164ad929cf5e8bd5718ce8253890cc981a009
f32906e4fc0b1b8519bdf9e7e2f5c1ff1c2182d4
fd7bbc6caffac0775396e8d99183f7fcd9c0cf2f
refs/heads/master
2023-05-25T13:57:57.084524
2023-05-16T17:07:46
2023-05-16T17:07:46
79,526,600
17
1
MIT
2017-01-20T04:55:19
2021-01-17T19:26:19
2023-05-01T21:36:57
Python
[ { "alpha_fraction": 0.5797950029373169, "alphanum_fraction": 0.5918334126472473, "avg_line_length": 32.221622467041016, "blob_id": "07e312826e948b7f5326ce12af820b3ef3e9582b", "content_id": "3734a2eb84b5074881dfc2e2e89f0f131fda6560", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6147, "license_type": "permissive", "max_line_length": 101, "num_lines": 185, "path": "/zappa_boilerplate/public/views_tests.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport mock\n\nfrom zappa_boilerplate.test_utils import BaseTestCase\n\n\nclass TestViews(BaseTestCase):\n\n def test_status(self):\n expected = {'status': 'ok'}\n response = self.client.get('/status')\n self.assert200(response)\n self.assertEqual(response.json, expected)\n\n def test_about(self):\n response = self.client.get('/about')\n self.assert200(response)\n\n def test_home_get(self):\n response = self.client.get('/')\n self.assert200(response)\n\n def test_register_get(self):\n response = self.client.get('/register')\n self.assert200(response)\n\n @mock.patch(\"zappa_boilerplate.public.views.flash\") # mocks out the calls to flash in views.py\n @mock.patch(\"zappa_boilerplate.utils.flash\") # mocks out the calls to flash (errors) in utils.py\n def test_register_and_login(self, mock_flash_errors, mock_flash):\n username = 'foo'\n email = '[email protected]'\n password = 'foobar'\n\n register_form_data = {\n 'username': username,\n 'email': email,\n 'password': password,\n 'confirm': password\n }\n\n response = self.client.post('/register', data=register_form_data, follow_redirects=True)\n self.assert200(response)\n\n flash_calls = []\n login_form_data = {\n 'username': username,\n 'password': 'wrong_password'\n }\n\n response = self.client.post('/', data=login_form_data, follow_redirects=True)\n self.assert200(response)\n flash_calls.append(mock.call('Password - Invalid password', 'warning'))\n\n login_form_data = {\n 'username': 'unknown_username',\n 'password': password\n }\n\n response = self.client.post('/', data=login_form_data, follow_redirects=True)\n self.assert200(response)\n flash_calls.append(mock.call('Username - Unknown username', 'warning'))\n\n mock_flash_errors.assert_has_calls(flash_calls)\n\n login_form_data = {\n 'username': username,\n 'password': password\n }\n\n response = self.client.post('/', data=login_form_data, follow_redirects=True)\n self.assert200(response)\n mock_flash.assert_called_with('You are logged in.', 'success')\n\n @mock.patch(\"zappa_boilerplate.utils.flash\")\n def test_login_form_validation_error(self, mock_flash):\n username = 'foo'\n\n form_data = {\n 'username': username,\n # 'password': 'password', <-- leaving this out will cause a form validation error\n }\n\n response = self.client.post('/', data=form_data, follow_redirects=True)\n self.assert200(response)\n mock_flash.assert_called_with('Password - This field is required.', 'warning')\n\n @mock.patch(\"zappa_boilerplate.utils.flash\")\n def test_register_error(self, mock_flash):\n username = 'foo'\n email = '[email protected]'\n password = 'foobar'\n\n form_data = {\n 'username': username,\n 'email': email,\n 'password': password,\n # 'confirm': password <-- leaving this out will cause a form verification error\n }\n\n response = self.client.post('/register', data=form_data, follow_redirects=True)\n self.assert200(response)\n mock_flash.assert_called_with('Verify password - This field is required.', 'warning')\n\n @mock.patch(\"zappa_boilerplate.utils.flash\")\n def test_register_username_twice(self, mock_flash):\n username = 'foo'\n email1 = '[email protected]'\n email2 = '[email protected]'\n password = 'foobar'\n\n form_data = {\n 'username': username,\n 'email': email1,\n 'password': password,\n 'confirm': password\n }\n\n response = self.client.post('/register', data=form_data, follow_redirects=True)\n self.assert200(response)\n\n form_data['email'] = email2\n response = self.client.post('/register', data=form_data, follow_redirects=True)\n self.assert200(response)\n mock_flash.assert_called_with('Username - Username already registered', 'warning')\n\n @mock.patch(\"zappa_boilerplate.utils.flash\")\n def test_register_email_twice(self, mock_flash):\n username1 = 'foo1'\n username2 = 'foo2'\n email = '[email protected]'\n password = 'foobar'\n\n form_data = {\n 'username': username1,\n 'email': email,\n 'password': password,\n 'confirm': password\n }\n\n response = self.client.post('/register', data=form_data, follow_redirects=True)\n self.assert200(response)\n\n form_data['username'] = username2\n response = self.client.post('/register', data=form_data, follow_redirects=True)\n self.assert200(response)\n mock_flash.assert_called_with('Email - Email already registered', 'warning')\n\n def test_not_found_error(self):\n response = self.client.get('/invalid_url')\n self.assert404(response)\n\n def test_logout(self):\n email = '[email protected]'\n username = 'foofoo'\n password = 'barbar'\n\n # register and login first\n register_form_data = {\n 'username': username,\n 'email': email,\n 'password': password,\n 'confirm': password\n }\n\n response = self.client.post('/register', data=register_form_data, follow_redirects=True)\n self.assert200(response)\n\n login_form_data = {\n 'username': username,\n 'password': password\n }\n\n # test that view redirects\n response = self.client.post('/', data=login_form_data, follow_redirects=True)\n self.assert200(response)\n\n response = self.client.get('/logout')\n self.assertStatus(response, 302)\n\n # login again and test that after redirection, returns 200\n response = self.client.post('/', data=login_form_data, follow_redirects=True)\n self.assert200(response)\n\n response = self.client.get('/logout', follow_redirects=True)\n self.assert200(response)\n\n" }, { "alpha_fraction": 0.7788461446762085, "alphanum_fraction": 0.7788461446762085, "avg_line_length": 33.66666793823242, "blob_id": "0441561dd67014237b8415ee2e7821de7c4e2c75", "content_id": "e8556fd4908a7fab0a228d3343a2a38d05f33ffd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 104, "license_type": "permissive", "max_line_length": 74, "num_lines": 3, "path": "/.coveragerc", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# Settings for the coverage package (which handles code coverage in tests)\n[report]\nshow_missing = True\n" }, { "alpha_fraction": 0.8253968358039856, "alphanum_fraction": 0.8253968358039856, "avg_line_length": 30.5, "blob_id": "1d6b4fa5b96d2313b986f63e8d6bcdc92da701be", "content_id": "5e0bea87bc4d1b6c9d66699650da711d95b8f740", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "permissive", "max_line_length": 36, "num_lines": 2, "path": "/zappa_boilerplate/models.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# import your models here\nimport zappa_boilerplate.user.models\n" }, { "alpha_fraction": 0.7382664680480957, "alphanum_fraction": 0.7431869506835938, "avg_line_length": 43.025001525878906, "blob_id": "e1a52d8ea23f7fe8b918005298acc8a53969713b", "content_id": "1af49767b23476d001fe1952fab42509ffde1305", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5284, "license_type": "permissive", "max_line_length": 513, "num_lines": 120, "path": "/README.md", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "\nZappa Boilerplate\n===================\n\n[![Coverage Status](https://coveralls.io/repos/github/dtnewman/zappa_boilerplate/badge.svg?branch=master)](https://coveralls.io/github/dtnewman/zappa_boilerplate?branch=master)\n\nThis repo is meant to demonstrate how to setup a **serverless** web application using [Flask](http://flask.pocoo.org/) and [Zappa](https://github.com/Miserlou/Zappa). The code here is a \"boilerplate\" template for a simple web application with a Postgres database that is meant to be a starting point for more complex projects.\n\nPython Version\n--------------\nThis project is supported on Python version 3.6. I am no longer maintaining support for 2.7.\n\nQuickstart\n----------\n\n**Step 1:** Clone the repo and install requirements (you probably want to do this inside of a [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/) with a name like *zappa_boilerplate_venv*):\n\n```\n$ git clone [email protected]:dtnewman/zappa_boilerplate.git\n$ cd zappa_boilerplate\n$ pip install -r requirements.txt\n$ pip install -r requirements_test.txt # requirements for running tests\n```\n\n**Step 2:** Create local and local test databases:\n\n```\n$ psql -c 'create database zappa_boilerplate;'\n$ psql -c 'create database zappa_boilerplate_test;'\n```\n\n**Step 3:** Setup the local database\n\nThis repo uses flask-Migrate to handle database migrations. The following commands will setup the initial database: \n\n```\n$ python manage.py db init # this will add a migrations folder to your application\n$ python manage.py db migrate # run initial migrations\n$ python manage.py db upgrade # apply initial migrations to the database\n```\n\nSee the [flask-Migrate documentation](https://flask-migrate.readthedocs.io/en/latest/) for more details information on this step.\n\n**Step 4:** Run the application on a local server:\n\n```\n$ python manage.py runserver\n```\n\nThen go to [http://localhost:5000/](http://localhost:5000/) in your browser to test out the application running locally.\n\n**Step 5:** Run tests: \n \n```\n$ python manage.py test\n```\n\nYou can also run tests directly with [nose](http://nose.readthedocs.io):\n\n```\n$ nosetests\n```\n\n\n**Step 6:** Deploy to AWS using Zappa:\n\n*Before you begin, make sure you have a valid AWS account and your [AWS credentials file](https://aws.amazon.com/blogs/security/a-new-and-standardized-way-to-manage-credentials-in-the-aws-sdks/) is properly installed.*\n\nFirst you will need to setup a hosted Postgres database for persistent storage. [ElephantSQL](https://www.elephantsql.com/) and [Heroku](https://www.heroku.com/postgres) both have free tiers for setting up databases with fairly small size limits (but enough to get started). Once you have that setup, you will need to get the database connection string (typically in the format *<postgresql://user:secret@host/db_name>*) from the hosting service.\n\nNext you will need to setup a bucket with a name of your choice on S3 (I used \"zappa-boilerplate-example-bucket\" for mine. You'll need to choose a bucket name that isn't taken). This can be done with the [AWS command line interface](https://aws.amazon.com/cli/) with the following command:\n\n```\n$ aws s3 mb s3://your-bucket-name\n```\n\nYou most likely don't want to commit the database connection string to source control (if you do, you can just modify the SQLALCHEMY_DATABASE_URI values in settings.py). So we will use environment variables to set this value. To do this, we generate a file called \"config_secrets.json\" and upload it to our S3 bucket. The \"remote_env\" value in zappa_settings.json will tell Zappa to grab environment variables (that we don't want to commit) from that file. We can generate the file and upload it to S3 as follows:\n\n```\n$ echo '{ \"DB_CONNECTION_STRING\": \"<YOUR SUPER SECRET DB CONNECTION STRING GOES HERE>\" }' > config_secrets.json\n$ aws s3 cp config_secrets.json s3://your-bucket-name\n```\n\nYou can delete the secrets file after you upload it if you wish (although it is in .gitignore to stop it from being committed accidentally).\n\nLater, you can expand this file with other environment variables for things that shouldn't be committed to source control.\n\nNow, we just need to update the file zappa_settings.json with your newly created bucket name. Simply change the strings in the fields \"s3_bucket\" and \"remote_env\" to match your bucket name.\n\nYou are now ready to deploy! To do so, simply run the command:\n\n```\n$ zappa deploy \n```\n\nWhen the process finishes, it will give you the URL of your newly deployed application!\n\n\nMaking Changes \n--------------\nIf you wish to deploy new changes to your repo simply update the code and then run:\n\n```\n$ zappa update \n```\n\nUndeploy\n--------\nUndeploying your project is simple. Just run the following command:\n```\n$ zappa undeploy \n```\n\nAdvanced Features\n-----------------\nThis project is meant to be a simple boilerplate template to get you started, but Zappa has a bunch of cool features that I did not touch in this repo. Checkout the [Zappa](https://github.com/Miserlou/Zappa) repository for more info.\n\n\nAcknowledgements\n----------------\nThe structure of this code borrows heavily from the [cookiecutter-flask](https://github.com/sloria/cookiecutter-flask) repo. And of course, it was inspired by the [Zappa](https://github.com/Miserlou/Zappa) project.\n" }, { "alpha_fraction": 0.5214521288871765, "alphanum_fraction": 0.6963696479797363, "avg_line_length": 16.823530197143555, "blob_id": "1a0bba434cb5eddf13e3cb59ab6faa79d5aafa49", "content_id": "8f2b8eb997620823cc90e5c6b4a076660ed8fd27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 303, "license_type": "permissive", "max_line_length": 23, "num_lines": 17, "path": "/requirements.txt", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "alembic==1.4.3\nbcrypt==3.2.0\ncssmin==0.2.0\nemail-validator==1.1.2\nFlask==1.1.2\nFlask-Assets==0.12\nFlask-Login==0.4.1\nFlask-Migrate==2.5.3\nFlask-Script==2.0.6\nFlask-SQLAlchemy==2.4.4\nFlask-WTF==0.14.3\njsmin==2.2.2\npsycopg2-binary==2.8.6\npython-dateutil==2.8.1\nwebassets==2.0\nWTForms==2.3.3\nzappa==0.52.0\n" }, { "alpha_fraction": 0.63705974817276, "alphanum_fraction": 0.6385911107063293, "avg_line_length": 21.517240524291992, "blob_id": "a2bdd034ea068126858b9c4d9b9467c295d1e555", "content_id": "c1f82002c7ad3341db7204aea8755b14205ef0b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "permissive", "max_line_length": 67, "num_lines": 29, "path": "/zappa_boilerplate/test_utils.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Helper utilities for testing.\"\"\"\n\nfrom flask_testing import TestCase\n\nfrom zappa_boilerplate import settings\nfrom zappa_boilerplate.app import create_app\nfrom zappa_boilerplate.database import db_session, init_db, drop_db\n\nAPP = None\n\n\nclass BaseTestCase(TestCase):\n\n def create_app(self):\n global APP\n if APP is None:\n APP = create_app(config_object=settings.Test)\n return APP\n\n def setUp(self):\n self.app = self.create_app()\n self.session = db_session\n init_db()\n\n def tearDown(self):\n self.session.close()\n drop_db()\n self.session.remove()\n" }, { "alpha_fraction": 0.6395833492279053, "alphanum_fraction": 0.6416666507720947, "avg_line_length": 19.869565963745117, "blob_id": "309ba16c880ef30dfa82f86d6b90e7adf2026cf4", "content_id": "973f224c2feb1495b87f7856f19de4819fa16a67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "permissive", "max_line_length": 44, "num_lines": 23, "path": "/zappa_boilerplate/assets.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Application assets.\"\"\"\nfrom flask_assets import Bundle, Environment\n\ncss = Bundle(\n 'libs/bootstrap/dist/css/bootstrap.css',\n 'css/*.css',\n filters='cssmin',\n output='public/css/common.css'\n)\n\njs = Bundle(\n 'libs/jQuery/dist/jquery.js',\n 'libs/bootstrap/dist/js/bootstrap.js',\n 'js/*.js',\n filters='jsmin',\n output='public/js/common.js'\n)\n\nassets = Environment()\n\nassets.register('js_all', js)\nassets.register('css_all', css)\n" }, { "alpha_fraction": 0.6718851923942566, "alphanum_fraction": 0.6810176372528076, "avg_line_length": 31.617021560668945, "blob_id": "f8eb1abfc047d917028a021f7264e53008f793d8", "content_id": "bf79c8acc05bfd43343ac1350f8c7fc424d5f5fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "permissive", "max_line_length": 124, "num_lines": 47, "path": "/zappa_boilerplate/settings.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\nos_env = os.environ\n\n\nclass Config(object):\n SECRET_KEY = os_env.get('SECRET_KEY', 'secret-key') # used for csrf TODO: Change me\n APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n BCRYPT_LOG_ROUNDS = 12\n ASSETS_DEBUG = False\n DEBUG_TB_ENABLED = False # Disable Debug toolbar\n DEBUG_TB_INTERCEPT_REDIRECTS = False\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass Local(Config):\n \"\"\"Local configuration.\"\"\"\n ENV = 'lcl'\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DB_CONNECTION_STRING\") or 'postgresql://localhost:5432/zappa_boilerplate'\n\n\nclass Development(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DB_CONNECTION_STRING\") or '<DEVELOPMENT DATABASE HERE>' # TODO: Change me\n FLASKS3_ACTIVE = True\n FLASKS3_DEBUG = True\n\n\nclass Production(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DB_CONNECTION_STRING\") or '<PRODUCTION DATABASE HERE>' # TODO: Change me\n DEBUG_TB_ENABLED = False # Disable Debug toolbar\n\n\nclass Test(Config):\n TESTING = True\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DB_CONNECTION_STRING\") or 'postgresql://localhost:5432/zappa_boilerplate_test'\n BCRYPT_LOG_ROUNDS = 4 # For faster tests\n WTF_CSRF_ENABLED = False # Allows form testing\n" }, { "alpha_fraction": 0.7005128264427185, "alphanum_fraction": 0.7015384435653687, "avg_line_length": 22.80487823486328, "blob_id": "bf5cdd45b4422977b67c428d648b4da62caef2c1", "content_id": "3d2adba4715b604bcf2fa2e8a36a1a7c14bb51f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 975, "license_type": "permissive", "max_line_length": 87, "num_lines": 41, "path": "/zappa_boilerplate/database.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nDatabase module, including the SQLAlchemy database object and DB-related utilities.\n\"\"\"\nimport sqlalchemy\nfrom sqlalchemy.orm import relationship\nfrom .extensions import db\nfrom sqlalchemy.orm import scoped_session, create_session\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\n# Alias common SQLAlchemy names\nColumn = db.Column\nrelationship = relationship\n\n\nclass Base(db.Model):\n \"\"\"Base model class\"\"\"\n __abstract__ = True\n\n\nengine = None\ndb_session = scoped_session(lambda: create_session(autocommit=False, autoflush=False,\n expire_on_commit=True, bind=engine))\n\nBase = declarative_base(cls=Base)\nBase.query = db_session.query_property()\n\n\ndef init_engine(uri, **kwargs):\n global engine\n engine = sqlalchemy.create_engine(uri, **kwargs)\n return engine\n\n\ndef init_db():\n Base.metadata.create_all(bind=engine)\n\n\ndef drop_db():\n Base.metadata.drop_all(bind=engine)" }, { "alpha_fraction": 0.7260578870773315, "alphanum_fraction": 0.7268003225326538, "avg_line_length": 26.489795684814453, "blob_id": "197451f1b8f0805a0d0e2805dc836a6638e91744", "content_id": "11bfb6bb3faaa1411e7b00fac5e4048351943184", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "permissive", "max_line_length": 113, "num_lines": 49, "path": "/manage.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom flask_script import Manager, Shell, Server\nfrom flask_migrate import MigrateCommand\n\nfrom zappa_boilerplate.app import create_app\nimport zappa_boilerplate.models as models\nfrom zappa_boilerplate.database import db, db_session\ntry:\n import zappa_boilerplate.settings_local as settings\nexcept ImportError:\n import zappa_boilerplate.settings as settings\n\nenv = os.environ.get('env', 'Local')\n\nconfig_object = getattr(settings, env)\napp = create_app(config_object=config_object)\n\nmanager = Manager(app)\n\n\n# The following code removes the database session after every request\[email protected]_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\n\ndef _make_context():\n \"\"\"Return context dict for a shell session so you can access\n app, db, and the User model by default.\n \"\"\"\n return {'app': app, 'db': db, 'models': models}\n\n\[email protected]\ndef test():\n import subprocess\n command = 'nosetests --cover-erase --with-xunit --with-coverage --cover-package=zappa_boilerplate'.split(' ')\n return subprocess.call(command)\n\nmanager.add_command('server', Server())\nmanager.add_command('shell', Shell(make_context=_make_context))\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n sys.stdout.flush()\n manager.run()\n" }, { "alpha_fraction": 0.6483775973320007, "alphanum_fraction": 0.6548672318458557, "avg_line_length": 33.591835021972656, "blob_id": "787eddc5704da14eaa2af2f3a24912a0c57d2dc2", "content_id": "c680a3f8b02dafbfba963d220e5a2046e834141c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1695, "license_type": "permissive", "max_line_length": 116, "num_lines": 49, "path": "/zappa_boilerplate/user/models.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "import bcrypt\nimport datetime\nimport flask\nfrom flask_login import UserMixin\nimport sqlalchemy\n\nfrom zappa_boilerplate.database import Base\n\n\nclass User(Base, UserMixin):\n\n __tablename__ = 'users'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n username = sqlalchemy.Column(sqlalchemy.String(80), unique=True, nullable=False)\n email = sqlalchemy.Column(sqlalchemy.String(80), unique=True, nullable=False)\n\n password = sqlalchemy.Column(sqlalchemy.String(128), nullable=True) # the hashed password\n created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.utcnow)\n\n def __init__(self, username, email, password):\n self.username = username\n self.email = email\n self.set_password(password)\n\n @staticmethod\n def create(session, username, email, password):\n user = User(username, email, password)\n session.add(user)\n session.flush()\n return user\n\n @classmethod\n def get_by_id(cls, session, user_id):\n if any(\n (isinstance(user_id, str) and user_id.isdigit(),\n isinstance(user_id, (int, float))),\n ):\n return session.query(cls).filter(cls.id == user_id).first()\n return None\n\n def set_password(self, password):\n self.password = bcrypt.hashpw(password.encode('utf-8'),\n bcrypt.gensalt(flask.current_app.config['BCRYPT_LOG_ROUNDS'])).decode('utf-8')\n\n def check_password(self, value):\n return bcrypt.checkpw(value.encode('utf-8'), self.password.encode('utf-8'))\n\n def __repr__(self):\n return '<User({username!r})>'.format(username=self.username)\n" }, { "alpha_fraction": 0.6544082760810852, "alphanum_fraction": 0.6619518995285034, "avg_line_length": 46.13333511352539, "blob_id": "eea9ef50c4c84a3963f0cb76ea5ab7a72b875ca3", "content_id": "770e7c4dbac1fddfb52ae3c95d7111c65e69f849", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "permissive", "max_line_length": 117, "num_lines": 45, "path": "/zappa_boilerplate/user/models_tests.py", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom sqlalchemy.exc import IntegrityError\n\nimport zappa_boilerplate.user.models as models\nfrom zappa_boilerplate.test_utils import BaseTestCase\n\n\nclass TestUser(BaseTestCase):\n\n def test_user_create(self):\n\n user1 = models.User.create(self.session, username='foo1', email='[email protected]', password='foobar1')\n user2 = models.User.create(self.session, username='foo2', email='[email protected]', password='foobar2')\n self.session.commit()\n\n users = self.session.query(models.User).all()\n self.assertEqual(users, [user1, user2])\n\n # check that an IntegrityError gets raise if we try to create a user with the same username\n self.assertRaises(IntegrityError, models.User.create,\n session=self.session, username='foo1', email='[email protected]', password='foofoo')\n self.session.rollback()\n\n # check that an IntegrityError gets raise if we try to create a user with the same email\n self.assertRaises(IntegrityError, models.User.create,\n session=self.session, username='differentusername', email='[email protected]', password='foofoo')\n\n def test_get_by_id(self):\n user1 = models.User.create(self.session, username='foo1', email='[email protected]', password='foobar1')\n user2 = models.User.create(self.session, username='foo2', email='[email protected]', password='foobar2')\n self.session.commit()\n\n self.assertEqual(models.User.get_by_id(self.session, user1.id), user1)\n self.assertEqual(models.User.get_by_id(self.session, user2.id), user2)\n self.assertIsNone(models.User.get_by_id(self.session, \"not_a_user_id\"))\n\n def test_repr(self):\n user = models.User.create(self.session, username='foo', email='[email protected]', password='foobar')\n self.assertEqual(repr(user), \"<User('foo')>\")\n\n def test_check_password(self):\n pwd = 'foobar'\n user = models.User.create(self.session, username='foo', email='[email protected]', password=pwd)\n self.assertTrue(user.check_password(pwd))\n self.assertFalse(user.check_password('not_the_password'))\n" }, { "alpha_fraction": 0.4868420958518982, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 14.199999809265137, "blob_id": "ab43a770028256cdeea54a336a7a87d246d4e75d", "content_id": "f7932f1d5c4172914c47d03cbac4da5ed6979544", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 76, "license_type": "permissive", "max_line_length": 20, "num_lines": 5, "path": "/requirements_test.txt", "repo_name": "dtnewman/zappa_boilerplate", "src_encoding": "UTF-8", "text": "coveralls>=1.1\ncoverage>=4.3.1\nFlask-Testing>=0.7.1\nmock>=2.0.0\nnose>=1.3.7\n" } ]
13
josehermosillaa/zoo-python
https://github.com/josehermosillaa/zoo-python
7840b61ccc9358b8af09a368e0000b6e6fed0d82
19151082994cd12f5be58bd62c9ab8d84a368afa
4b55de02ab6f1fa8e5abc0557490ea2c3b78164c
refs/heads/master
2023-06-10T17:20:26.022045
2021-06-30T22:45:44
2021-06-30T22:45:44
381,519,472
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5425220131874084, "alphanum_fraction": 0.5637829899787903, "avg_line_length": 32.26829147338867, "blob_id": "2375942006a8cc1dda46f039e8cab964d07dab51", "content_id": "2e1e175f1fa592ed95e65b60b26168c5ed7b39b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 162, "num_lines": 41, "path": "/clases/animal.py", "repo_name": "josehermosillaa/zoo-python", "src_encoding": "UTF-8", "text": "import random\ndef randint(min = 0, max = 100):\n if min == 0:\n num = round(max*(random.random()))\n \n return num\n else:\n num = round((max-min)*(random.random())+min)\n \n return num\n# Una vez que haya probado sus diferentes animales y se sienta mรกs cรณmodo con la herencia, cree una clase de zoolรณgico para ayudar a manejar a todos sus animales.\nclass Animal:\n def __init__(self, nombre, edad,nivel_salud=10,nive_felicidad=10):\n self.nombre = nombre\n self.edad = edad\n self.nivel_salud = randint()\n self.nivel_felicidad = randint()\n\n def display_info(self):\n print(f'''\n Nombre: {self.nombre}\n Edad : {self.edad}\n Salud : {self.nivel_salud}%\n Felicidad: {self.nivel_felicidad}%\n ''')\n \n return self\n \n def alimentar(self):\n raise NotImplementedError(\"No esta implementada la funcion\")\n # print(f''' se esta alimentando a {self.name}, รฑam รฑam''')\n # if self.nivel_salud+10 >100:\n # self.nivel_salud = 100\n # else:\n # self.nivel_salud += 10\n # if self.nivel_felicidad+10>100:\n # self.nivel_felicidad = 100\n # else:\n # self.nivel_felicidad += 10\n # print(f'''los nuevos parametros de {self.name} son''')\n # self.display_info()\n" }, { "alpha_fraction": 0.49655961990356445, "alphanum_fraction": 0.5286697149276733, "avg_line_length": 35.20833206176758, "blob_id": "09af0931d73554f5666eba43ef932a917c6f48c4", "content_id": "5a86fdc62d83aa723b75334fa0d263efbdca3ebc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/clases/pinguino.py", "repo_name": "josehermosillaa/zoo-python", "src_encoding": "UTF-8", "text": "from .animal import Animal\n\nclass Pinguino(Animal):\n def __init__(self, nombre, edad, nivel_salud=10, nivel_felicidad=10):\n super().__init__(nombre, edad, nivel_salud, nivel_felicidad)\n self.reproduccion = 'Oviparo'\n\n def display_info(self):\n print('-'*15,\"Tipo: \",self.__class__.__name__,'-'*15)\n super().display_info()\n \n def alimentar(self):\n print(f''' se esta alimentando a {self.nombre} con Pez, รฑam รฑam''')\n #\n if self.nivel_salud+15 >100:\n self.nivel_salud = 100\n else:\n self.nivel_salud += 15\n if self.nivel_felicidad+15>100:\n self.nivel_felicidad = 100\n else:\n self.nivel_felicidad += 15\n print(f'''los nuevos parametros de {self.nombre} son''')\n self.display_info() " }, { "alpha_fraction": 0.5005793571472168, "alphanum_fraction": 0.5330243110656738, "avg_line_length": 35, "blob_id": "ca045e69f76b9a7a155f89e9ff195b4138c0c63e", "content_id": "e4cafd32647a58ab0509206a66047f37754c9384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 81, "num_lines": 24, "path": "/clases/leon.py", "repo_name": "josehermosillaa/zoo-python", "src_encoding": "UTF-8", "text": "from .animal import Animal\n\nclass Leon(Animal):\n def __init__(self, nombre, edad, nivel_salud=10, nivel_felicidad=10):\n super().__init__(nombre, edad, nivel_salud, nivel_felicidad)\n self.reproduccion = 'viviparo'\n \n def display_info(self):\n print('-'*15,\"Tipo: \",self.__class__.__name__,'-'*15)\n super().display_info()\n\n def alimentar(self):\n print(f''' se esta alimentando a {self.nombre} con Carne, รฑam รฑam''')\n #\n if self.nivel_salud+30 >100:\n self.nivel_salud = 100\n else:\n self.nivel_salud += 30\n if self.nivel_felicidad+30>100:\n self.nivel_felicidad = 100\n else:\n self.nivel_felicidad += 30\n print(f'''los nuevos parametros de {self.nombre} son''')\n self.display_info()" }, { "alpha_fraction": 0.5083829164505005, "alphanum_fraction": 0.5197404026985168, "avg_line_length": 34.20000076293945, "blob_id": "1dce5840927e0a1758cb071d26e7e0ed0fea622a", "content_id": "703e2ced75011c2facfb147656f7f473ce044183", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3698, "license_type": "no_license", "max_line_length": 146, "num_lines": 105, "path": "/zooInteractivo.py", "repo_name": "josehermosillaa/zoo-python", "src_encoding": "UTF-8", "text": "import time\nfrom clases.animal import Animal\nfrom clases.elefante import Elefante\nfrom clases.leon import Leon\nfrom clases.pinguino import Pinguino\nfrom clases.oso import Oso\nclass Zoo:\n def __init__(self, zoo_name):\n self.animales = []\n self.name = zoo_name\n def add_leon(self, nombre,edad):\n self.animales.append( Leon(nombre,edad) )\n \n \n def add_pinguino(self, nombre,edad):\n self.animales.append(Pinguino(nombre,edad) )\n \n def add_elefante(self,nombre,edad): \n self.animales.append(Elefante(nombre,edad) )\n \n def add_oso(self, nombre, edad):\n self.animales.append(Oso(nombre,edad) )\n\n def print_all_info(self):\n print(\"-\"*30, self.name, \"-\"*30)\n for animal in self.animales:\n animal.display_info()\n\n def lista_animales(self):\n # print(f\"la lista es:{self.animales}\")#siempre se debe hacer con un for\n for i in range(len(self.animales)):\n print(self.animales[i].nombre)\n print(self.animales[i].edad)\n # print(self.animales[i].nivel_felicidad)\n \n def alimentar_a_todos(self):\n print(\"-\"*30, self.name, \"-\"*30)\n for animal in self.animales:\n animal.alimentar()\n return self\n \nzoo = []\nwhile True:\n\n print(\"Bienvenido al sistema de ZOOlogicos\")\n menu = input(\"1.Agregar Zoologico \\n2.Ingresar a Zoologico \\n3.Quitar Zoologicos \\n4.Salir \\n Ingrese una opcion: \")\n if menu == \"1\":\n nombre_zoologico = input(\"Ingrese el nombre del Zoologico:\")\n # zoo.append(nombre_zoologico)\n zoologico= Zoo(f\"{nombre_zoologico}\")\n zoo.append(zoologico)\n print(f\"se ha agregado el zoologico {zoologico.name}\")\n time.sleep(2)\n continue\n elif menu == \"2\":\n print(\"los zoologicos agregados son:\")\n for zoologico in zoo:\n print(zoo.index(zoologico),\"-\",\">\",zoologico.name)\n while True:\n i = int(input(\"ingrese la opcion de su zoologico:\"))\n print(\"zoologico seleccionado\",zoo[i].name)\n time.sleep(2)\n seleccion=int(input(\"1. Agregar Animal \\n2. Mostrar Animales \\n3. Alimentar Animales \\n4. Salir del Zoologico \\nIngrese una opcion:\"))\n if seleccion == 1:\n while True :\n numero = int(input(\"Que animal desea aregar? : \\n1. Leon \\n2. Pinguino \\n3. Elefante \\n4. Oso \\n5. Volver atras \"))\n if numero == 1:\n pass\n elif numero == 2:\n pass\n elif numero == 3:\n pass\n elif numero == 4:\n pass\n elif numero == 5:\n print(\"los zoologicos agregados son:\")\n for zoologico in zoo:\n print(zoo.index(zoologico),\"-\",\">\",zoologico.name)\n break\n else:\n print(\"La opcion ingresada no es valida\")\n continue\n \n elif seleccion == 2:\n pass\n elif seleccion == 3:\n pass\n elif seleccion == 4:\n break\n else:\n print(\"tecla no reconocida\")\n time.sleep(0.5)\n print(\"intente nuevamente\")\n time.sleep(0.5)\n continue\n elif menu == \"3\":\n continue\n elif menu == \"4\":\n print(\"Hasta pronto\")\n time.sleep(1)\n break\n else:\n print(\"tecla no reconocida\")\n time.sleep(1) #en segundos\n pass\n\n\n" }, { "alpha_fraction": 0.5962417721748352, "alphanum_fraction": 0.612493634223938, "avg_line_length": 30.74193572998047, "blob_id": "53b7f61b39f4abb336df1433130b34db09de748c", "content_id": "203d7bd21ea75f962c298f25b02f96e118ec1ddb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1969, "license_type": "no_license", "max_line_length": 80, "num_lines": 62, "path": "/zoo.py", "repo_name": "josehermosillaa/zoo-python", "src_encoding": "UTF-8", "text": "\nfrom clases.animal import Animal\nfrom clases.elefante import Elefante\nfrom clases.leon import Leon\nfrom clases.pinguino import Pinguino\nfrom clases.oso import Oso\nclass Zoo:\n def __init__(self, zoo_name):\n self.animales = []\n self.name = zoo_name\n\n def add_leon(self, nombre,edad):\n self.animales.append( Leon(nombre,edad) )\n return self\n \n \n def add_pinguino(self, nombre,edad):\n self.animales.append(Pinguino(nombre,edad) )\n return self\n def add_elefante(self,nombre,edad): \n self.animales.append(Elefante(nombre,edad) )\n return self\n def add_oso(self, nombre, edad):\n self.animales.append(Oso(nombre,edad) )\n return self\n def print_all_info(self):\n print(\"-\"*30, self.name, \"-\"*30)\n for animal in self.animales:\n animal.display_info()\n return self\n def lista_animales(self):\n # print(f\"la lista es:{self.animales}\")#siempre se debe hacer con un for\n for i in range(len(self.animales)):\n print(self.animales[i].nombre)\n print(self.animales[i].edad)\n # print(self.animales[i].nivel_felicidad)\n return self\n def alimentar_a_todos(self):\n print(\"-\"*30, self.name, \"-\"*30)\n for animal in self.animales:\n animal.alimentar()\n return self\n # def __str__(self) -> str:\n # str_con_el_resultado = 'Objeto de animales: '\n # for animal in self.animales:\n # str_con_el_resultado += \"\\n * {}\".format(animal)\n # print(str_con_el_resultado)\n # return str_con_el_resultado\n \n\nzoo1 = Zoo(\"John's Zoo\")\nzoo2 = Zoo(\"Metro\")\nzoo1.add_leon(\"Nala\",5)\nzoo1.add_leon(\"Simba\",6)\nzoo1.add_pinguino(\"Rey Julien\",4)\nzoo1.add_pinguino(\"Rico\",4)\nzoo1.add_oso(\"Baloo\",10)\nzoo1.add_elefante(\"Dumbo\",10)\nzoo1.print_all_info()\nzoo1.alimentar_a_todos()\nzoo2.add_elefante(\"dumbo 2\",15)\nzoo2.alimentar_a_todos()\n# zoo1.lista_animales()\n" } ]
5
rudyshine/2018weather
https://github.com/rudyshine/2018weather
177a0623679a94c2771230252b0f80f1ddd67c70
97a8c3073a90740c411921cbb16ac810e3ffa804
4f1e1055b07e7bfae4239b80662b10964c1ea08a
refs/heads/master
2021-05-04T16:20:10.380548
2018-03-19T07:20:29
2018-03-19T07:20:29
120,249,822
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5707002878189087, "alphanum_fraction": 0.5873356461524963, "avg_line_length": 35.54901885986328, "blob_id": "ea7e2a38e30e639d454325934e32177db8e24f4b", "content_id": "d441d99755d0e29850ae0ba64c1df8773b6ac689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4097, "license_type": "no_license", "max_line_length": 305, "num_lines": 102, "path": "/Weather/7day.py", "repo_name": "rudyshine/2018weather", "src_encoding": "UTF-8", "text": "# coding : UTF-8\n\nimport requests\nimport csv\nimport random\nimport time\nimport socket\nimport http.client\nimport codecs\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\ndef get_content(url , data = None):\n header={'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8','Accept-Encoding':'gzip, deflate, sdch','Accept-Language':'zh-CN,zh;q=0.8','Connection':'keep-alive','User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'}\n timeout = random.choice(range(80,180))\n\n try:\n rep = requests.get(url,headers = header,timeout = timeout)\n rep.encoding ='utf-8'\n time.sleep(random.choice(range(8,15)))\n except socket.error as e:\n print('4:', e)\n time.sleep(random.choice(range(20,60)))\n except http.client.BadStatusLine as e:\n print('5:', e)\n time.sleep(random.choice(range(30,80)))\n except http.client.IncompleteRead as e:\n print('6:', e)\n time.sleep(random.choice(range(5,15)))\n return rep.text\n\n\ndef get_data(html_text,id,name):\n final = []\n bs = BeautifulSoup(html_text,\"html.parser\")# ๅˆ›ๅปบBeautifulSoupๅฏน่ฑก\n body = bs.body# ่Žทๅ–body้ƒจๅˆ†\n data = body.find('div', {'id':'7d'})# ๆ‰พๅˆฐidไธบ7d็š„div\n ul = data.find('ul')# ่Žทๅ–ul้ƒจๅˆ†\n li = ul.find_all('li')# ่Žทๅ–ๆ‰€ๆœ‰็š„li\n for day in li:# ๅฏนๆฏไธชliๆ ‡็ญพไธญ็š„ๅ†…ๅฎน่ฟ›่กŒ้ๅކ\n temp = []\n idNumber = id\n temp.append(idNumber)\n idName=name\n temp.append(idName)\n date = day.find('h1').string# ๆ‰พๅˆฐๆ—ฅๆœŸ\n temp.append(date)# ๆทปๅŠ ๅˆฐtempไธญ\n inf = day.find_all('p')# ๆ‰พๅˆฐliไธญ็š„ๆ‰€ๆœ‰pๆ ‡็ญพ\n temp.append(inf[0].string,)# ็ฌฌไธ€ไธชpๆ ‡็ญพไธญ็š„ๅ†…ๅฎน๏ผˆๅคฉๆฐ”็Šถๅ†ต๏ผ‰ๅŠ ๅˆฐtempไธญ\n temperature_highest = day.find('span').string# ๆ‰พๅˆฐๆœ€้ซ˜ๆธฉ\n temperature_highest = temperature_highest.replace('โ„ƒ','')# ๅˆฐไบ†ๆ™šไธŠ็ฝ‘็ซ™ไผšๅ˜๏ผŒๆœ€้ซ˜ๆธฉๅบฆๅŽ้ขไนŸๆœ‰ไธชโ„ƒ\n temp.append(temperature_highest) # ๅฐ†ๆœ€้ซ˜ๆธฉๆทปๅŠ ๅˆฐtempไธญ\n temperature_lowest = day.find('i').string# ๆ‰พๅˆฐๆœ€ไฝŽๆธฉ\n temperature_lowest = temperature_lowest.replace('โ„ƒ','')# ๆœ€ไฝŽๆธฉๅบฆๅŽ้ขๆœ‰ไธชโ„ƒ๏ผŒๅŽปๆމ่ฟ™ไธช็ฌฆๅท\n temp.append(temperature_lowest)#ๅฐ†ๆœ€ไฝŽๆธฉๆทปๅŠ ๅˆฐtempไธญ\n final.append(temp)#ๅฐ†tempๅŠ ๅˆฐfinalไธญ\n return final\n\n\n\ndef write_data(data, name):\n file_name = name\n with open(file_name,'a', errors='ignore', newline='')as f:\n f_csv = csv.writer(f)\n f_csv.writerows(data)\n\nif __name__ == '__main__':\n\n inforead = codecs.open(\"list_CityId.txt\", 'r', 'utf-8') ##ๆ‰“ๅผ€ๅŸŽๅธ‚IDๅˆ—่กจๆ–‡ไปถ\n idNumber = inforead.readline().rstrip('\\r\\n') ##่ฏปๅŸŽๅธ‚ๅˆ—่กจ\n nameforead = codecs.open(\"list_CityName.txt\", 'r', 'utf-8') ##ๆ‰“ๅผ€ๅŸŽๅธ‚ๅ็งฐๅˆ—่กจๆ–‡ไปถ\n idName = nameforead.readline().rstrip('\\r\\n') ##่ฏปๅŸŽๅธ‚ๅ็งฐ\n while idNumber != \"\":\n idNumber = idNumber.rstrip('\\r\\n')\n idName = idName.rstrip('\\r\\n')\n url ='http://www.weather.com.cn/weather/'+idNumber+'.shtml'\n print(url)\n try:\n html = get_content(url)\n result = get_data(html,idNumber,idName)\n print(result)\n write_data(result,'weather.csv')\n except:\n print('IP่ขซๅฐ1๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(600)\n try:\n html = get_content(url)\n result = get_data(html, idNumber, idName)\n print(result)\n write_data(result, 'weather.csv')\n except:\n print('IP่ขซๅฐ2๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(600)\n try:\n html = get_content(url)\n result = get_data(html, idNumber, idName)\n print(result)\n write_data(result, 'weather.csv')\n except:\n pass\n idNumber = inforead.readline().rstrip('\\r\\n')\n idName = nameforead.readline().rstrip('\\r\\n')" }, { "alpha_fraction": 0.5625370740890503, "alphanum_fraction": 0.5720213651657104, "avg_line_length": 32.05882263183594, "blob_id": "c1393bfe232cc0434e93ca6acd3f11e0c28fd1c8", "content_id": "afc51c6e0e32a3619310560184bcac3bc52f7191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1795, "license_type": "no_license", "max_line_length": 132, "num_lines": 51, "path": "/Weather/weather_spider.py", "repo_name": "rudyshine/2018weather", "src_encoding": "UTF-8", "text": "import requests\nimport csv\nimport time\nimport codecs\nimport pymongo\n\ndef get_infor(idNumber,ProgramTime):\n url='http://www.weather.com.cn/data/sk/'+str(idNumber)+'.html'\n r = requests.get(url)\n r.encoding = 'utf-8'\n city=r.json()['weatherinfo']['city']\n cityid=r.json()['weatherinfo']['cityid']\n temp=r.json()['weatherinfo']['temp']\n WD=r.json()['weatherinfo']['WD']\n WS=r.json()['weatherinfo']['WS']\n SD=r.json()['weatherinfo']['SD']\n time=r.json()['weatherinfo']['time']\n print(city, cityid, temp, WD, WS, SD, time,ProgramTime)\n info_weather.insert({'ๅŸŽๅธ‚': city, \"ๅŸŽๅธ‚ID\": cityid, 'ๆธฉๅบฆ': temp, '้ฃŽๅ‘': WD, '้ฃŽๅŠ›': WS, 'ๆนฟๅบฆ': SD, 'ๅ‘ๅธƒๆ—ถ้—ด': time,'็จ‹ๅบ่ฟ่กŒๆ—ถ้—ด': ProgramTime })\n\n\n\nif __name__ == '__main__':\n\n client = pymongo.MongoClient('localhost', 27017)\n WeatherData = client['WeatherData']\n info_weather = WeatherData['info_weather']\n # set = WeatherData.info_weather\n\n ProgramTime = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n #ๆ–‡ไปถๆ“ไฝœ่ฏปๅ†™ไฟกๆฏ\n print('Read idNumber:')\n inforead = codecs.open(\"urllist_ID.txt\", 'r', 'utf-8')\n idNumber = inforead.readline()\n while idNumber != \"\":\n idNumber = idNumber.rstrip('\\r\\n')\n try:\n get_infor(idNumber,ProgramTime)\n except:\n print('IP่ขซๅฐ1๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(600)\n try:\n get_infor(idNumber, ProgramTime)\n except:\n print('IP่ขซๅฐ2๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(3600)\n try:\n get_infor(idNumber, ProgramTime)\n except:\n print(\"IP่ขซๅฐ\"+ idNumber)\n idNumber = inforead.readline()\n\n" }, { "alpha_fraction": 0.5138835906982422, "alphanum_fraction": 0.5591479539871216, "avg_line_length": 33.155845642089844, "blob_id": "9696de4167c7a885f6568b62033439e462859824", "content_id": "4a1de0bdf8b6db6d47c316656b33d81c8546f3c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2971, "license_type": "no_license", "max_line_length": 134, "num_lines": 77, "path": "/Weather/HisoryWeatherData.py", "repo_name": "rudyshine/2018weather", "src_encoding": "UTF-8", "text": "# encoding=utf-8\nimport codecs\nimport requests\nimport json\nimport pymongo\nimport time\n\ndef request(year, month,idNumber):\n url = \"http://d1.weather.com.cn/calendar_new/\"+ year+ \"/\" +str(idNumber)+\"_\"+year+month+\".html?_=1495685758174\"\n print(url)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\",\n \"Referer\": \"http://www.weather.com.cn/weather40d/101280701.shtml\",\n }\n return requests.get(url,headers=headers)\n\ndef parse(res):\n time.sleep(10)\n json_str = res.content.decode(encoding='utf-8')[11:] #่Žทๅ–ๆ•ฐๆฎ\n return json.loads(json_str) ##่งฃๆžjsonๆ•ฐๆฎ\n\ndef save(list):\n subkey = {'date': 'ๆ—ฅๆœŸ','hmax': 'ๆœ€้ซ˜ๆธฉๅบฆ', 'hmin': 'ๆœ€ไฝŽๆธฉๅบฆ', 'hgl': 'ๆนฟๅบฆ', 'wk': 'ๆ˜ŸๆœŸ', 'time': 'ๅ‘ๅธƒๆ—ถ้—ด'} ##, 'fe': '่Š‚ๆ—ฅ'\n CityInfo={'ๅŸŽๅธ‚ID': idNumber,'ๅŸŽๅธ‚ๅ็งฐ': idName}\n\n for dict in list:\n subdict = {value: dict[key] for key, value in subkey.items()} #ๆๅ–ๅŽŸๅญ—ๅ…ธไธญ้ƒจๅˆ†้”ฎๅ€ผๅฏน๏ผŒๅนถๆ›ฟๆขkeyไธบไธญๆ–‡\n subdict.update(CityInfo) #ๅŠ ๅ…ฅๅŸŽๅธ‚ID\n forecast.insert_one(subdict) #ๆ’ๅ…ฅmongodbๆ•ฐๆฎๅบ“\n\ndef getInfo():\n year = \"2018\"\n # month = \"01\" ##ๅขž้‡ๆ•ฐๆฎๆ”ถ้›†๏ผŒๆ‰‹ๅŠจไฟฎๆ”นๆœˆไปฝ\n\n # #ๅ…จ้‡ๆ”ถ้›†๏ผŒ็›ดๆŽฅๆ‰“ๅผ€ๆœˆไปฝๆณจ้‡Šๅณๅฏ\n month=1\n for i in range(month, 13):\n month = str(i) if i > 9 else \"0\" + str(i) # ๅฐไบŽ10็š„ๆœˆไปฝ่ฆ่กฅ0\n save(parse(request(year, month,idNumber)))\n\n\n\n\nif __name__ == '__main__':\n\n client = pymongo.MongoClient('172.28.171.13', 27017) # ่ฟžๆŽฅmongodb,็ซฏๅฃ27017 ๆญฃๅผๆ•ฐๆฎๅบ“\n # client = pymongo.MongoClient('localhost', 27017) # ่ฟžๆŽฅmongodb,็ซฏๅฃ27017\n test = client['WeatherData'] # ๅˆ›ๅปบๆ•ฐๆฎๅบ“ๆ–‡ไปถtest\n forecast = test['HistoryData2018'] # ๅˆ›ๅปบ่กจforecast\n inforead = codecs.open(\"list_CityId.txt\", 'r', 'utf-8') ##ๆ‰“ๅผ€ๅŸŽๅธ‚IDๅˆ—่กจๆ–‡ไปถ\n idNumber = inforead.readline().rstrip('\\r\\n') ##่ฏปๅŸŽๅธ‚ๅˆ—่กจ\n nameforead = codecs.open(\"list_CityName.txt\", 'r', 'utf-8') ##ๆ‰“ๅผ€ๅŸŽๅธ‚ๅ็งฐๅˆ—่กจๆ–‡ไปถ\n idName = nameforead.readline().rstrip('\\r\\n') ##่ฏปๅŸŽๅธ‚ๅ็งฐ\n\n while idNumber != \"\":\n idNumber = idNumber.rstrip('\\r\\n')\n idName=idName.rstrip('\\r\\n')\n try:\n getInfo()\n time.sleep(3)\n except :\n print('IP่ขซๅฐ1๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(600)\n try:\n getInfo()\n time.sleep(3)\n except :\n print('IP่ขซๅฐ2๏ผŒ็จ‹ๅบไผ‘ๆฏๅฝ“ๅ‰IDไธบ๏ผš' + idNumber)\n time.sleep(3600)\n try:\n getInfo()\n time.sleep(3)\n except :\n pass\n print(\"IP่ขซๅฐ\"+ idNumber)\n idNumber=inforead.readline()\n idName=nameforead.readline()" }, { "alpha_fraction": 0.5930047631263733, "alphanum_fraction": 0.6279809474945068, "avg_line_length": 25.25, "blob_id": "766bfce409a605f572ba8590de242afca03da321", "content_id": "73465c2dd010add5c2300505f0d59f861cc52702", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 136, "num_lines": 24, "path": "/Weather/urllist.py", "repo_name": "rudyshine/2018weather", "src_encoding": "UTF-8", "text": "# encoding=utf-8\nimport codecs\nimport requests\nimport json\nimport pymongo\nimport time\nimport pandas as pd\n\n\ninforead = codecs.open(\"urllist_ID.txt\", 'r', 'utf-8')\nidNumber = inforead.readline()\nwhile idNumber != \"\":\n idNumber = idNumber.rstrip('\\r\\n')\n year = \"2016\"\n month = 1\n url = \"http://d1.weather.com.cn/calendar_new/\" + year + \"/\" + str(idNumber) + \"_\" + str(year) + str(month) + \".html?_=1513081038706\"\n print(url)\n file = codecs.open(\"urllist.txt\", \"w\")\n file.write(url)\n file.close()\n idNumber = inforead.readline()\n # request(year, month,idNumber)\n # time.sleep(3)\nprint('zz')" }, { "alpha_fraction": 0.6682927012443542, "alphanum_fraction": 0.6975609660148621, "avg_line_length": 26.266666412353516, "blob_id": "daf15dc3869ffd80cd4581c74c6b410a6d9fbee2", "content_id": "5e978cf8294c94533e3e2451da1610487e7d43e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/Weather/test7.py", "repo_name": "rudyshine/2018weather", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\n\nurl='http://www.weather.com.cn/weather/101190401.shtml'\n\nhtml = requests.get(url).content\nsoup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')\nresult = soup.find_all(\"ul\",class_=\"t clearfix\")\nprint(result)\n\nfor day in result: # ๅฏนๆฏไธชliๆ ‡็ญพไธญ็š„ๅ†…ๅฎน่ฟ›่กŒ้ๅކ\n temp = []\n date = day.find('h1').string # ๆ‰พๅˆฐๆ—ฅๆœŸ\n temp.append(date) # ๆทปๅŠ ๅˆฐtempไธญ\n print(date)\n\n" } ]
5
dnrsm/Introduction-to-Computation-and-Programming-Using-Python
https://github.com/dnrsm/Introduction-to-Computation-and-Programming-Using-Python
8f7a7fa5d33165de69bef8c2013791561a956ff5
e678fb016c76352e5f4e78b45d4b24165ce39ff0
64c3b10f0333661d8cc711f927a11de628414e74
refs/heads/master
2020-03-17T03:15:19.744191
2018-05-13T13:53:59
2018-05-13T13:53:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.484375, "alphanum_fraction": 0.5174851417541504, "avg_line_length": 27.30526351928711, "blob_id": "012bd5178ea18448fd3f3df0e3d15695b60328ab", "content_id": "b291e0ba9040ff9c0db79b09bda3073f54dac10e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2840, "license_type": "no_license", "max_line_length": 74, "num_lines": 95, "path": "/chapter_16/16.py", "repo_name": "dnrsm/Introduction-to-Computation-and-Programming-Using-Python", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\n\n\n# 16.1\ndef rollDie():\n return random.choice([1,2,3,4,5,6])\n\ndef chackPascal(numTrials):\n \"\"\"ๅ‹ๅˆฉใ™ใ‚‹็ขบ็އใฎ่ฉ•ไพกๅ€คใ‚’่กจ็คบใ™ใ‚‹\"\"\"\n numWins = 0\n for i in range(numTrials):\n for j in range(24):\n d1 = rollDie()\n d2 = rollDie()\n if d1 == 6 and d2 == 6:\n numWins += 1\n break\n print('Probability of winning = ', numWins/numTrials)\n # print(numWins)\n # print(numTrials)\n\nchackPascal(10000)\n\n\n# 16.2\nclass CrapsGame(object):\n def __init__(self):\n self.pass_wins, self.pass_losses = 0, 0\n self.dp_wins, self.dp_losses, self.dp_pushes = 0, 0, 0\n\n def play_hand(self):\n throw = rollDie() + rollDie()\n if throw == 7 or throw == 11:\n self.pass_wins += 1\n self.dp_losses += 1\n elif throw == 2 or throw == 3 or throw == 12:\n self.pass_losses += 1\n if throw == 12:\n self.dp_pushes += 1\n else:\n self.dp_wins += 1\n else:\n point = throw\n while True:\n throw = rollDie() + rollDie()\n if throw == point:\n self.pass_wins += 1\n self.dp_losses += 1\n break\n elif throw == 7:\n self.pass_losses += 1\n self.dp_wins += 1\n break\n\n def pass_results(self):\n return (self.pass_wins, self.pass_losses)\n\n def dp_results(self):\n return (self.dp_wins, self.dp_losses, self.dp_pushes)\n\n\n\ndef craps_sim(hands_per_game, num_games):\n \"\"\"hands_per_gameใฎๆ‰‹ใ‹ใ‚‰ๆˆใ‚‹ใ‚ฒใƒผใƒ ใ‚’numGamesๅ›žใƒ—ใƒฌใ‚คใ—ใ€\n ใใฎ็ตๆžœใ‚’่กจ็คบใ™ใ‚‹\"\"\"\n games = []\n\n # ใ‚ฒใƒผใƒ ใ‚’numGamesๅ›žใƒ—ใƒฌใ‚คใ™ใ‚‹\n for t in range(num_games):\n c = CrapsGame()\n for i in range(hands_per_game):\n c.play_hand()\n games.append(c)\n\n # ๅ„ใ‚ฒใƒผใƒ ใฎ็ตฑ่จˆๅ€คใ‚’ๆฑ‚ใ‚ใ‚‹\n pROI_per_game, dpROI_per_game = [], []\n for g in games:\n wins, losses = g.pass_results()\n pROI_per_game.append((wins - losses) / float(hands_per_game))\n wins, losses, pushes = g.dp_results()\n dpROI_per_game.append((wins - losses) / float(hands_per_game))\n\n # ็ตฑ่จˆๅ€คใฎๆฆ‚่ฆใ‚’ๆฑ‚ใ‚ใฆ่กจ็คบใ™ใ‚‹\n meanROI = str(round((100 * sum(pROI_per_game) / num_games), 4)) + '%'\n sigma = str(round(100 * np.std(pROI_per_game), 4)) + '%'\n print('Pass:', 'Mean ROI =', meanROI, 'Std. Dev. =', sigma)\n\n meanROI = str(round((100 * sum(dpROI_per_game) / num_games), 4)) + '%'\n sigma = str(round(100 * np.std(dpROI_per_game), 4)) + '%'\n print('Don\\'t Pass:', 'Mean ROI =', meanROI, 'Std. Dev. =', sigma)\n\ncraps_sim(20, 10)\ncraps_sim(1000000, 10)\ncraps_sim(20, 1000000)" } ]
1
stefanzier/constantcontact-export
https://github.com/stefanzier/constantcontact-export
8a1b88878e1bf5c150df5459939a1d48e3f92a45
fcebbcb9f738d785fecd2e23f229f2a52179bc76
d80af737b918874f8f3b3e1d65603084dca89fd5
refs/heads/master
2021-03-27T15:49:27.730380
2018-02-02T20:37:23
2018-02-02T20:37:23
117,887,034
0
0
null
2018-01-17T20:09:28
2018-02-02T20:39:19
2021-03-19T22:08:20
Python
[ { "alpha_fraction": 0.7102040648460388, "alphanum_fraction": 0.7163265347480774, "avg_line_length": 24.789474487304688, "blob_id": "3b359a74ac6f6783a12da83bf6ef9b0c3a0ce85f", "content_id": "fd756d7ff551bec353ab6e6db600ac033b082fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 58, "num_lines": 19, "path": "/app/routes.py", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "from flask import render_template\nfrom app import app\nfrom app.forms import RetrieveCSVForm\nfrom app.models.export import download_csv\n\n\[email protected]('/')\[email protected]('/index', methods=[\"GET\", \"POST\"])\ndef index():\n form = RetrieveCSVForm()\n if form.validate_on_submit():\n return download_csv.delay(form.eventId.data)\n\n return render_template('index.html', form=form)\n\n\[email protected](500)\ndef pageNotFound(error):\n return \"Uh oh! Please go back and check your Event ID\"\n" }, { "alpha_fraction": 0.6255379915237427, "alphanum_fraction": 0.6286465525627136, "avg_line_length": 32.19047546386719, "blob_id": "4aa28caa14b647853cdb606713e413599be465b6", "content_id": "b198cdc2824b6f6768fa77ee4a2a6a7c8b98cdeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4185, "license_type": "no_license", "max_line_length": 80, "num_lines": 126, "path": "/app/models/export.py", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "import sys\nimport csv\nimport os\nimport requests\nfrom flask import make_response\nfrom app.lib.constantcontact import ConstantContact\nfrom celery import task\n\n\ndef progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n prcnt = round(100.0 * count / float(total), 1)\n bar = 'โ–ˆ' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s %s\\r' % (bar, prcnt, '%', status))\n sys.stdout.flush()\n\n\ndef WriteDictToCSV(csv_file, csv_columns, dict_data):\n with open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in dict_data:\n writer.writerow(data)\n return\n\n\ndef WriteCSVFile(eventId):\n # ConstantContact setupรงj\n API_KEY = \"YOUR_API_KEY_HERE\"\n API_ACCESS_CODE = \"YOUR_API_ACCESS_CODE_HERE\"\n cc = ConstantContact(API_KEY, API_ACCESS_CODE)\n\n # Make API request and retrieve all registrants\n event_reg_params = {\"eventId\": eventId, \"limit\": \"limit\"}\n request = cc.eventspot.events.eventId.registrants(\n variable=event_reg_params)\n\n # List of registrants\n registrants = request[\"results\"]\n\n if len(registrants) == 0:\n print(\"Please check your eventId. Exiting now...\")\n sys.exit(0)\n\n # Dictionary to keep track of our registrants\n users = {}\n\n # Loop over registrants and extract first_name, last_name, email\n # and store as a user in the users dictionary\n for user in registrants:\n uid = user[\"id\"]\n users[uid] = {\"first_name\": user[\"first_name\"],\n \"last_name\": user[\"last_name\"]}\n\n # We are using the user variable so we can remove registrants\n del registrants\n\n # Keep track of our total users and the current index for our progress bar\n total_users = len(users)\n current_user_index = 1\n\n # Now loop over our users and perform an API request to retrieve a user's\n # company name since we need company_name in the CSV export\n for uid in users:\n # Output progress bar\n progress_status = \"User: \"+str(current_user_index)+\"/\"+str(total_users)\n progress(current_user_index, total_users, status=progress_status)\n\n # Make API request for user's company name\n try:\n reg_user_params = {\"eventId\": eventId, \"registrantId\": uid}\n reg_user_req = cc.eventspot.events.eventId.registrants.registrantId(\n variable=reg_user_params\n )\n\n # Store company name in respective user\n company_name = reg_user_req[\"sections\"][1][\"fields\"][2][\"value\"]\n users[uid][\"company\"] = company_name\n\n # Store the registration date in respective user\n registration_date = reg_user_req[\"registration_date\"]\n users[uid][\"registration_date\"] = registration_date\n\n # increment our current index for our progress bar\n current_user_index += 1\n except requests.exceptions.HTTPError as err:\n print(\"Could not process User ID:\", uid)\n print(err)\n print(\"Skipping\", uid)\n print(\"-----------------------------\")\n continue\n\n # Our WriteDictToCSV function requires a list of dictionary values\n dict_data = []\n for uid in users:\n dict_data.append(users[uid])\n\n # Don't need our users dictionary anymore so remove it\n del users\n\n # Perform CSV export\n csv_columns = [\"first_name\", \"last_name\", \"company\", \"registration_date\"]\n currentPath = os.getcwd()\n csv_file = currentPath + \"/app/csv/registrants.csv\"\n\n WriteDictToCSV(csv_file, csv_columns, dict_data)\n\n\n@task\ndef download_csv(eventId):\n WriteCSVFile(eventId=eventId)\n currentPath = os.getcwd()\n csv_string = \"\"\n with open(currentPath + \"/app/csv/registrants.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n csv_string += \", \".join(row) + \"\\n\"\n response = make_response(csv_string)\n cd = 'attachment; filename=registrants.csv'\n response.headers['Content-Disposition'] = cd\n response.mimetype = 'text/csv'\n\n return response\n" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 33.125, "blob_id": "a8421c702e8bb1aa4765d34499039f0116499871", "content_id": "d44bed44ad7bc46d481271dfe3b1b98b64ee40b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/app/forms.py", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\n\nclass RetrieveCSVForm(FlaskForm):\n eventId = StringField('Event ID', validators=[DataRequired()])\n submit = SubmitField('Retrieve Registrants')\n" }, { "alpha_fraction": 0.5209821462631226, "alphanum_fraction": 0.5258928537368774, "avg_line_length": 25.046510696411133, "blob_id": "16b2e6cb730d8f29032be77ac9f3b9d506583d66", "content_id": "51982d970843e5e9c652e0e43c27c59504e3cee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2240, "license_type": "no_license", "max_line_length": 78, "num_lines": 86, "path": "/app/lib/constantcontact.py", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "'''\nJordan Clark\n2015 - Blitzen.com\n'''\n\nimport requests\nimport re\nimport json\n\nBASE_URL = 'https://api.constantcontact.com/v2/'\n\n\nclass ConstantContactError(Exception):\n\n def __init__(self, response):\n self.response = response\n\n def __str__(self):\n return self.response.get('error', 'No error provided')\n\n\nclass IncorrectApiKey(ConstantContactError):\n pass\n\n\nclass ConstantContact(object):\n\n def __init__(self, api_key, access_token):\n self.api_key = api_key\n self.access_token = access_token\n self._attr_path = []\n self._request_method = {\n 'POST': requests.post,\n 'GET': requests.get,\n 'PUT': requests.put,\n 'DELETE': requests.delete,\n }\n\n def __call__(self, *args, **kwargs):\n url = BASE_URL + '/'.join(self._attr_path)\n added_limit = False\n for variable_name, variable_sub in kwargs.get('variable', {}).items():\n url = re.sub(variable_name, variable_sub, url)\n print(\"URL: \" + url)\n if \"limit\" in kwargs.get('variable') and not added_limit:\n url += \"?limit=500\"\n added_limit = True\n\n self._attr_path = []\n return self._request(\n url,\n kwargs.get('data', {}),\n kwargs.get('method', 'GET'),\n kwargs.get('params', {})\n )\n\n def __getattr__(self, attr, *args, **kwargs):\n self._attr_path.append(attr)\n return self\n\n def _request(self, endpoint, data, method='GET', params={}):\n headers = {\n 'Authorization': 'Bearer {}'.format(self.access_token),\n 'Content-Type': 'application/json'\n }\n params['api_key'] = self.api_key\n\n if(type(data) == dict):\n data = json.dumps(data)\n\n try:\n request = self._request_method[method]\n except KeyError:\n raise ConstantContactError('Unknown verb')\n\n response = request(\n endpoint,\n data=data,\n params=params,\n headers=headers\n )\n\n if response.status_code < 400:\n return response.json()\n else:\n response.raise_for_status()\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 24, "blob_id": "9c280f21139d4ef9bf977c76254e2d858892ce39", "content_id": "0954918673eccdfbb725b065a32a80884551df7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/README.md", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "# constantcontact-export\n" }, { "alpha_fraction": 0.6394230723381042, "alphanum_fraction": 0.6875, "avg_line_length": 28.714284896850586, "blob_id": "69a4ce4ce3b763c9078f0e703217e49e24b5954a", "content_id": "c807affebb5696457ecfeaabe52b0e4601ada483", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 67, "num_lines": 7, "path": "/config.py", "repo_name": "stefanzier/constantcontact-export", "src_encoding": "UTF-8", "text": "import os\n\n\nclass Config(object):\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'super-secret-key'\n CELERY_BROKER_URL = \"redis://localhost:6379/0\"\n CELERY_RESULT_BACKEND = \"redis://localhost:6379/0\"\n" } ]
6
utkarshpant/year4
https://github.com/utkarshpant/year4
b4de2fc01c10502f562ee077b95d145805b819fc
f4c5bbc26ae8a3b085a9801c7310804e5917479e
d636bc00425cb597f6322476d129c020d96aeff6
refs/heads/master
2020-11-24T04:41:56.841517
2020-03-17T18:18:13
2020-03-17T18:18:13
227,969,634
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4684210419654846, "alphanum_fraction": 0.4894736707210541, "avg_line_length": 18.100000381469727, "blob_id": "483afd886f678eba10c0bdd79dd50238edf18c56", "content_id": "a204f348c6cb1aa6bec6aa0b85bb362703601351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 190, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/misc/misc_1.cpp", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "//testing a for loop\n#include <iostream>\n\nint main() {\n std::cout << \"Entered main.\" << std::endl;\n for (int a = 10; a >= 4; a--) {\n std::cout << \"hello.\";\n }\n return 0;\n}" }, { "alpha_fraction": 0.5383347272872925, "alphanum_fraction": 0.5490519404411316, "avg_line_length": 19.931034088134766, "blob_id": "76c6fa39a520ea3ada0bd030abde986203a97a74", "content_id": "2504f31466778d01d0f898b1daae5b12d6cc37a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 67, "num_lines": 58, "path": "/ss/password.cpp", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "//exp 6 ss \n#include <iostream>\n#include <regex>\n#include <string>\n#include <conio.h>\n\nclass Password {\n protected:\n //members\n std::string pass_input;\n \n\n int pass_flag;\n //methods\n \n\n public:\n //methods\n void record_input(std::string message);\n void validate_input();\n \n //constructors\n Password() {\n pass_flag = 0;\n pass_input = \"\";\n }\n};\n\nint main() {\n std::string input;\n Password newPassword;\n newPassword.record_input(\"Enter a password:\\t\");\n newPassword.validate_input();\n return 0;\n}\n\nvoid Password::record_input(std::string message) {\n std::cout << message;\n std::string input;\n std::cin >> input;\n pass_input = input;\n}\n\nvoid Password::validate_input() {\n int len_flag = 0, cap_flag = 0, num_flag = 0, sp_char_flag = 0;\n std::regex cap_regex(\"([0-9]+)|([a-d]+)\");\n if (pass_input.length() >= 8 && pass_input.length() <= 26) {\n len_flag = 1;\n std::cout << \"Long enough\" << std::endl;\n } else {\n std::cout << \"Too short\" << std::endl;\n }\n if (int la = std::regex_search(pass_input, cap_regex)) {\n std::cout << la << std::endl;\n } else {\n std::cout << la;\n }\n}" }, { "alpha_fraction": 0.5075922012329102, "alphanum_fraction": 0.5162689685821533, "avg_line_length": 21, "blob_id": "f5131f6605a6ed194c862834d1851e4806e2133a", "content_id": "4fee5ddf885368b7f3ff66ed7ef4f934ccf39b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 461, "license_type": "no_license", "max_line_length": 69, "num_lines": 21, "path": "/ss/buffer.cpp", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdlib>\n#include <cstring>\n\nint main() {\n char buff[1];\n std::cout << \"Enter password:\\n\" << std::endl;\n gets(buff);\n int pass = 0;\n if (strcmp(buff, \"a\")) {\n std::cout << \"Incorrect password!\" << std::endl;\n } else {\n std::cout << \"Correct password!\" << std::endl;\n pass = 1;\n }\n\n if (pass) {\n std::cout << \"The password state was changed.\" << std::endl; \n }\n return 0;\n}" }, { "alpha_fraction": 0.6944046020507812, "alphanum_fraction": 0.7044476270675659, "avg_line_length": 21.483871459960938, "blob_id": "4ce95227b2f06800419cc45a9411903df3568b64", "content_id": "d1ccad0636a8807d47d2305382b4abec792f3ed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 68, "num_lines": 31, "path": "/ss/vernam_cyph.py", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "import random\nletters = \"abcdefghijklmnopqrstuvwxyz\"\n\nmessage = input(\"Enter message to encrypt:\\t\")\ncipher = \"\"\n\n\n#generating random key\nkey_elements = [letters[random.randint(0,25)] for letter in message]\n\nkey = \"\".join(key_elements)\n\nprint(\"Generated key:\\t\", key)\n\n#encryption process\nfor i in range(len(message)):\n cipherval = letters.index(message[i]) + letters.index(key[i])\n cipher += letters[cipherval % 26]\n\nprint(\"Encrypted Message:\\t\", cipher)\n\noriginal = \"\"\n\n#decryption\nfor i in range(len(cipher)):\n cipherval = letters.index(cipher[i]) - letters.index(key[i])\n\n original += letters[cipherval % 26]\n\nif original==message:\n print(\"Decrypted message matches original.\")\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 30.5, "blob_id": "c133bd0396bcf92d31e05101a021e39eb4d55ff0", "content_id": "8940f21ae7579ee6eca11010bc2bec81567c8ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/README.md", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "# year4\nrepository for all code written in 4th year coursework\n" }, { "alpha_fraction": 0.5388272404670715, "alphanum_fraction": 0.5562599301338196, "avg_line_length": 13.318181991577148, "blob_id": "280398b66203285f2571d988f64e0354c3a3b911", "content_id": "fa1e63d097c7954de2ea26b9bba730246e408b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 46, "num_lines": 44, "path": "/ss/rsa.py", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "import random\nimport math\n\np = 11\nq = 3\n\n# //multiplication of two large prime numbers;\n# //first part of public key;\nn = int(p * q)\n\n# second part of public key\nphi = int((p - 1) * (q - 1))\nprint(\"phi = \", phi)\nexp = 2\n\nwhile (exp < phi):\n if (math.gcd(exp, phi) == 1):\n break\n else:\n exp += 1\n\nprint(\"exp = \", exp)\n# finally\npublic_key = (n, exp)\n\n# private key\n\nk = 2\nd = 7\nprint(\"d = \", d)\n\nprivate_key = (d, n)\n\nmessage = 7\nprint(\"message:\\t\", message)\n# encryption\na = message ** exp\nc = int(a % n)\nprint(\"cipher text:\\t\", c)\n\nb = c ** d\nprint(\"b = \", b)\ndec = int(b % n)\nprint(\"decrypted text:\\t\", dec)\n\n" }, { "alpha_fraction": 0.540229856967926, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 14.727272987365723, "blob_id": "4bca87798c126d50148aae93765d6e79fb4f090c", "content_id": "2dbc7439eedeead9cd93a05c3a404df6b1fee240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/ss/kerberos_exp_8.py", "repo_name": "utkarshpant/year4", "src_encoding": "UTF-8", "text": "#Hullo\n\nusers_db = {\n 'user1': 'password1',\n 'user2': 'password2'\n}\n\nclass Client:\n\n def __init__(self, str: username, str: password):\n self.us = username\n\n" } ]
7
jxu43/replication-mbpo
https://github.com/jxu43/replication-mbpo
457f9b31f33c2ae333f0bcc2d0123ccff22d5ca7
06c46451add42d9a736df95f370de61ca2f7e334
d89e39582172e356c1f4fdd5b5cbbde21a70bf27
refs/heads/master
2020-03-31T14:01:06.168961
2020-02-25T21:29:27
2020-02-25T21:29:27
221,546,337
22
3
null
null
null
null
null
[ { "alpha_fraction": 0.6019799113273621, "alphanum_fraction": 0.6217787265777588, "avg_line_length": 40.05806350708008, "blob_id": "442cfbdea202f3ceca2e316a0bb6f4110a17e141", "content_id": "c861df7f46274d7a05e742ce41b10f5f0b37669b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6364, "license_type": "no_license", "max_line_length": 140, "num_lines": 155, "path": "/model.py", "repo_name": "jxu43/replication-mbpo", "src_encoding": "UTF-8", "text": "import torch\ntorch.set_default_tensor_type(torch.cuda.FloatTensor)\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\nimport gzip\n\ndevice = torch.device('cuda')\n\nnum_train = 60000 # 60k train examples\nnum_test = 10000 # 10k test examples\ntrain_inputs_file_path = './MNIST_data/train-images-idx3-ubyte.gz'\ntrain_labels_file_path = './MNIST_data/train-labels-idx1-ubyte.gz'\ntest_inputs_file_path = './MNIST_data/t10k-images-idx3-ubyte.gz'\ntest_labels_file_path = './MNIST_data/t10k-labels-idx1-ubyte.gz'\n\nBATCH_SIZE = 100\n\nclass Game_model(nn.Module):\n def __init__(self, state_size, action_size, reward_size, hidden_size=200, learning_rate=1e-2):\n super(Game_model, self).__init__()\n self.hidden_size = hidden_size\n self.nn1 = nn.Sequential(\n nn.Linear(state_size + action_size, hidden_size),\n Swish()\n )\n self.nn2 = nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n Swish()\n )\n self.nn3 = nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n Swish()\n )\n self.nn4 = nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n Swish()\n )\n\n self.output_dim = state_size + reward_size\n # Add variance output\n self.nn5 = nn.Linear(hidden_size, self.output_dim * 2)\n\n self.max_logvar = Variable(torch.ones((1, self.output_dim)).type(torch.FloatTensor) / 2, requires_grad=True).to(device)\n self.min_logvar = Variable(-torch.ones((1, self.output_dim)).type(torch.FloatTensor) * 10, requires_grad=True).to(device)\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n\n def forward(self, x):\n nn1_output = self.nn1(x)\n nn2_output = self.nn2(nn1_output)\n nn3_output = self.nn3(nn2_output)\n nn4_output = self.nn4(nn3_output)\n nn5_output = self.nn5(nn4_output)\n\n mean = nn5_output[:, :self.output_dim]\n\n logvar = self.max_logvar - F.softplus(self.max_logvar - nn5_output[:, self.output_dim:])\n logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)\n\n return mean, torch.exp(logvar)\n\n def loss(self, mean, logvar, labels, inc_var_loss=True):\n inv_var = torch.exp(-logvar)\n if inc_var_loss:\n mse_loss = torch.mean(torch.pow(mean - labels, 2) * inv_var)\n var_loss = torch.mean(logvar)\n total_loss = mse_loss + var_loss\n else:\n mse_loss = nn.MSELoss()\n total_loss = mse_loss(input=logits, target=labels)\n return total_loss\n\n def train(self, loss):\n self.optimizer.zero_grad()\n loss += 0.01 * torch.sum(self.max_logvar) - 0.01 * torch.sum(self.min_logvar)\n loss.backward()\n self.optimizer.step()\n\nclass Ensemble_Model():\n def __init__(self, network_size, elite_size, state_size, action_size, reward_size=1, hidden_size=200):\n self.network_size = network_size\n self.elite_size = elite_size\n self.model_list = []\n self.state_size = state_size\n self.action_size = action_size\n self.reward_size = reward_size\n self.elite_model_idxes = []\n for i in range(network_size):\n self.model_list.append(Game_model(state_size, action_size, reward_size, hidden_size))\n\n def train(self, inputs, labels, batch_size=256):\n for start_pos in range(0, inputs.shape[0], batch_size):\n input = torch.from_numpy(inputs[start_pos : start_pos + batch_size]).float().to(device)\n label = torch.from_numpy(labels[start_pos : start_pos + batch_size]).float().to(device)\n losses = []\n for model in self.model_list:\n mean, logvar = model(input)\n loss = model.loss(mean, logvar, label)\n model.train(loss)\n losses.append(loss)\n\n sorted_loss_idx = np.argsort(losses)\n self.elite_model_idxes = sorted_loss_idx[:self.elite_size].tolist()\n\n def predict(self, inputs, batch_size=1024):\n #TODO: change hardcode number to len(?)\n ensemble_mean = np.zeros((self.network_size, inputs.shape[0], self.state_size + self.reward_size))\n ensemble_logvar = np.zeros((self.network_size, inputs.shape[0], self.state_size + self.reward_size))\n for i in range(0, inputs.shape[0], batch_size):\n input = torch.from_numpy(inputs[i:min(i + batch_size, inputs.shape[0])]).float().to(device)\n for idx in range(self.network_size):\n pred_2d_mean, pred_2d_logvar = self.model_list[idx](input)\n ensemble_mean[idx,i:min(i + batch_size, inputs.shape[0]),:], ensemble_logvar[idx,i:min(i + batch_size, inputs.shape[0]),:] \\\n = pred_2d_mean.detach().cpu().numpy(), pred_2d_logvar.detach().cpu().numpy()\n\n return ensemble_mean, ensemble_logvar\n\n\nclass Swish(nn.Module):\n def __init__(self):\n super(Swish, self).__init__()\n\n def forward(self, x):\n x = x * F.sigmoid(x)\n return x\n\ndef get_data(inputs_file_path, labels_file_path, num_examples):\n with open(inputs_file_path, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * num_examples)\n data = np.frombuffer(buf, dtype=np.uint8) / 255.0\n inputs = data.reshape(num_examples, 784)\n\n with open(labels_file_path, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(num_examples)\n labels = np.frombuffer(buf, dtype=np.uint8)\n\n return np.array(inputs, dtype=np.float32), np.array(labels, dtype=np.int8)\n\ndef main():\n # Import MNIST train and test examples into train_inputs, train_labels, test_inputs, test_labels\n train_inputs, train_labels = get_data(train_inputs_file_path, train_labels_file_path, num_train)\n test_inputs, test_labels = get_data(test_inputs_file_path, test_labels_file_path, num_test)\n\n model = Ensemble_Model(5, 3, 5, 779, 5, 50)\n for i in range(0, 10000, BATCH_SIZE):\n model.train(Variable(torch.from_numpy(train_inputs[i:i+BATCH_SIZE])), Variable(torch.from_numpy(train_labels[i:i+BATCH_SIZE])))\n model.predict(Variable(torch.from_numpy(test_inputs[:1000])))\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6149364113807678, "alphanum_fraction": 0.616525411605835, "avg_line_length": 39.17021179199219, "blob_id": "15b66f9e17696ff8570bb94717744f42c185fadf", "content_id": "70ac216057d583bcd334a1a86547eff1f19c9502", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "permissive", "max_line_length": 114, "num_lines": 47, "path": "/sac/replay_memory.py", "repo_name": "jxu43/replication-mbpo", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nfrom operator import itemgetter\n\nclass ReplayMemory:\n def __init__(self, capacity):\n self.capacity = capacity\n self.buffer = []\n self.position = 0\n\n def push(self, state, action, reward, next_state, done):\n if len(self.buffer) < self.capacity:\n self.buffer.append(None)\n self.buffer[self.position] = (state, action, reward, next_state, done)\n self.position = (self.position + 1) % self.capacity\n\n def push_batch(self, batch):\n if len(self.buffer) < self.capacity:\n append_len = min(self.capacity - len(self.buffer), len(batch))\n self.buffer.extend([None] * append_len)\n\n if self.position + len(batch) < self.capacity:\n self.buffer[self.position : self.position + len(batch)] = batch\n self.position += len(batch)\n else:\n self.buffer[self.position : len(self.buffer)] = batch[:len(self.buffer) - self.position]\n self.buffer[:len(batch) - len(self.buffer) + self.position] = batch[len(self.buffer) - self.position:]\n self.position = len(batch) - len(self.buffer) + self.position\n\n def sample(self, batch_size):\n if batch_size > len(self.buffer):\n batch_size = len(self.buffer)\n batch = random.sample(self.buffer, int(batch_size))\n state, action, reward, next_state, done = map(np.stack, zip(*batch))\n return state, action, reward, next_state, done\n\n def sample_all_batch(self, batch_size):\n idxes = np.random.randint(0, len(self.buffer), batch_size)\n batch = list(itemgetter(*idxes)(self.buffer))\n state, action, reward, next_state, done = map(np.stack, zip(*batch))\n return state, action, reward, next_state, done\n\n def return_all(self):\n return self.buffer\n\n def __len__(self):\n return len(self.buffer)\n" }, { "alpha_fraction": 0.6422287225723267, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 27.33333396911621, "blob_id": "43c4f8eb711f5b5513da12f03dec81a9a841c599", "content_id": "413dc03d524fa053c8836983690cbb34fad0a108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 341, "license_type": "no_license", "max_line_length": 108, "num_lines": 12, "path": "/README.md", "repo_name": "jxu43/replication-mbpo", "src_encoding": "UTF-8", "text": "## Overview\n\nReplication of [When to Trust Your Model: Model-Based Policy Optimization](https://arxiv.org/abs/1906.08253)\n\n## Dependencies\n\nMuJoCo 1.5 & MuJoCo 2.0\n\n## Usage\n> python mbpo.py --env-name 'Walker2d-v2' --num_epoch 300 --model_type 'tensorflow'\n\n> python mbpo.py --env-name 'Walker2d-v2' --num_epoch 300 --model_type 'pytorch'\n\n" } ]
3
malhotrachetan/py-finance-and-trading
https://github.com/malhotrachetan/py-finance-and-trading
48cc5cc403f2491918bad89998c685b4c3c35685
8fd7d85494b5a331d435bbe184999fd81ed615dd
cb233d7065a01cd3889b42c59ed1fa07b0fad694
refs/heads/master
2021-08-26T08:39:09.315140
2017-11-22T17:20:04
2017-11-22T17:20:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6975308656692505, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 39.25, "blob_id": "8cd671d6bf3b39f35fb622ae221bc8bd316308c4", "content_id": "3b7a8afce2b8cb8499f06d1db45477bcdf4e4a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 98, "num_lines": 4, "path": "/with_pandas.py", "repo_name": "malhotrachetan/py-finance-and-trading", "src_encoding": "UTF-8", "text": "import pandas_datareader as pdr\nimport datetime \naapl=pdr.get_data_yahoo('AAPL',start=datetime.datetime(2006,10,1),end=datetime.datetime(2012,1,1))\n#print(aapl)\n\n" }, { "alpha_fraction": 0.5742574334144592, "alphanum_fraction": 0.7326732873916626, "avg_line_length": 32.66666793823242, "blob_id": "f3fac6e61eef7266bde4d5d39028f63d151422d0", "content_id": "13f634a1005d97cea5d768b623914f351de9b231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 74, "num_lines": 3, "path": "/with_quandl.py", "repo_name": "malhotrachetan/py-finance-and-trading", "src_encoding": "UTF-8", "text": "import quandl\naapl=quandl.get(\"WIKI/AAPL\",start_date=\"2006-10-01\",end_date=\"2012-01-01\")\nprint(aapl)\n" }, { "alpha_fraction": 0.8529411554336548, "alphanum_fraction": 0.8529411554336548, "avg_line_length": 10, "blob_id": "593e6d5b159b7f3d0a0e7fdcbbd4cdef6fe6e8a6", "content_id": "6e336fa3d33d3440b145e97b4411581b6c25cf5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 34, "license_type": "no_license", "max_line_length": 17, "num_lines": 3, "path": "/requirements.txt", "repo_name": "malhotrachetan/py-finance-and-trading", "src_encoding": "UTF-8", "text": "pandas-datareader\nquandl\nzipline\n\n" } ]
3
King-Of-The-Cookies/SSIPWebapp
https://github.com/King-Of-The-Cookies/SSIPWebapp
dd817eda0bf732e7dca5f34169ae90b64348e9bd
20df62f42d1865edf183d64d09423de3e0f9928b
33931eeef126a5a7243b0c79e8d519ffc578b8c6
refs/heads/master
2021-05-10T21:56:50.527734
2018-01-20T16:49:45
2018-01-20T16:49:45
118,243,691
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7054451107978821, "alphanum_fraction": 0.7262590527534485, "avg_line_length": 39.14179229736328, "blob_id": "4152e23e1ab8bb3db44d7edccc3bc18a560d7a89", "content_id": "6f346bb11c4f14ff02d29cf67debe1a147ca9142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5381, "license_type": "no_license", "max_line_length": 213, "num_lines": 134, "path": "/webapp/main.py", "repo_name": "King-Of-The-Cookies/SSIPWebapp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 29 13:29:04 2017\n\n@author: guysimons\n\"\"\"\n\nimport os\nfrom flask import Flask, request, render_template, redirect\nimport numpy as np\nimport pandas as pd\nfrom scipy import spatial\n\n<<<<<<< HEAD\nos.chdir(\"/Users/guysimons/Documents/BISSmaster/smart service project 1/Final System/WebappRepo/SSIPWebapp/webapp\")\n=======\nos.chdir(\"/Users/lisaherzog/Google Drive/UM/Smart Services/Smart Service Project/WebApplication/SSIPWebapp/webapp\")\n>>>>>>> 512c2cff7c649d7ab6e9f871b9bee1e6901fd4d0\n\n\"\"\"\nThe function below computes the similarity between all columns in a dataframe. It first creates an empty dataframe to hold the \nsimilarity values. Then, it computes the similarity between all columns in a nested loop and stores the values in the empty dataframe.\nLastly the index and column names are set and the dataframe containing the similarity values is returned.\n\"\"\"\n\ndef ComputeCosSimilarityMatrix(inputDf):\n similarities = pd.DataFrame(np.zeros((inputDf.shape[1],inputDf.shape[1])))\n for i in range(0, inputDf.shape[1]):\n for j in range(0, inputDf.shape[1]):\n cosSim = 1 - spatial.distance.cosine(inputDf.iloc[:,i], inputDf.iloc[:,j])\n similarities.iloc[i,j] = cosSim\n similarities.columns = inputDf.columns\n similarities.index = inputDf.columns\n return similarities\n\n\"\"\"\nThe purpose of the Predict function is to make a prediction on what activity a the user will like next. It does this based on the similarity matrix and the userVector. The\nuserVector is a vector that records previous likes(1), dislikes(-1), and not rated (0) values. When the function is called it first checks if the userVector only contains 0s. If this is the case,\nit suggests a random activity as no predictions can be made for users that haven't rated anything. If the userVector does contain likes and dislikes\nthe activities for which rating already exist are stored. Then, the userVector is multiplied with the similarity matrix and the resulting values are divided by the sum of the similarities.\nSubsequently, activities that are already rated are removed and the activity with the highest score is returned.\n\"\"\"\n\ndef Predict(userVector, similarityMatrix):\n if userVector.sum()==0:\n randomActivity = np.random.randint(1, similarityMatrix.columns.shape[0])\n return similarityMatrix.columns.values[randomActivity]\n else:\n userVector = userVector.fillna(0)\n previouslyRated = userVector[(userVector > 0) | (userVector < 0)].index.values\n activityScores = similarityMatrix.dot(userVector.values.reshape((similarityMatrix.shape[1],))).div(similarityMatrix.sum(axis=1))\n activityScores = activityScores.drop(previouslyRated) \n return np.argmax(activityScores)\n\n\"\"\"\nThe userItemMatrix is a matrix of 1 (like) and -1 (dislike) values that is used to compute the similarity matrix. \n\"\"\"\n\nuserItemMatrix = pd.DataFrame(np.random.choice([1,-1], size = (1000,10)), columns = [\"Activity1\", \"Activity2\", \"Activity3\", \"Activity4\", \"Activity5\", \"Activity6\", \"Activity7\",\"Activity8\",\"Activity9\",\"Activity10\"])\n\n\"\"\"\nThe next step is to normalize the user vectors in the userItemMatrix. The purpose of this is to make sure that users with many ratings (those that like everything), contribute less to any individual rating. \nTo do this we square and sum the items in the user vector, to then take the square root which results in the magintude. \nThen, all user values are divided by the respective user's magnitude. \n\"\"\"\n\nmagnitude = np.sqrt(np.square(userItemMatrix[userItemMatrix==1]).sum(axis=1))\nuserItemMatrix[userItemMatrix==1] = userItemMatrix[userItemMatrix==1].divide(magnitude, axis='index')\nuserItemMatrix = userItemMatrix.fillna(0)\n\n\"\"\"\nCall the ComputeCosSimilarityMatrix function with the userItemMatrix as input.\n\"\"\"\n\nsimilarityMatrix = ComputeCosSimilarityMatrix(userItemMatrix)\n\n############WEB APPLICATION ROUTING##################\napp = Flask(__name__)\nrating = None\ncurrentActivity = None\n\n\n\"\"\"\nGenerate new userVector for a user that opens the application for the first time.\n\"\"\"\n\nuserVector = pd.Series(np.zeros((similarityMatrix.shape[1])), index=similarityMatrix.columns.values)\n\[email protected](\"/\")\ndef index():\n return render_template(\"main.html\")\n\[email protected](\"/home\")\ndef home():\n<<<<<<< HEAD\n return render_template(\"home.html\")\n\[email protected](\"/test\")\ndef test():\n return render_template(\"test.html\")\n=======\n return render_template(\"home.html\")\n\[email protected](\"/A1-food\")\ndef A1food():\n return render_template(\"A1-food.html\")\n>>>>>>> 512c2cff7c649d7ab6e9f871b9bee1e6901fd4d0\n\[email protected](\"/chooseactivity\")\ndef chooseactivity():\n \n \"\"\"\n Call Predict function.\n \"\"\"\n \n predictedactivity = Predict(userVector ,similarityMatrix)\n global currentActivity\n currentActivity = predictedactivity\n return render_template(\"chooseactivity.html\", predictedactivity = predictedactivity)\n\[email protected]('/submitForm', methods=['POST'])\ndef catchResponse():\n target = request.form['likedActivity']\n global rating\n rating = int(target)\n global currentActivity\n global userVector\n userVector[currentActivity] = rating\n print(userVector) \n return redirect('/chooseactivity')\n\nif __name__ == \"__main__\":\n app.run(port = 5001)\n\n\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 13.5, "blob_id": "74197688645a0033b7e55a10f5cf85b41fa1972f", "content_id": "22e1319040892594a08490fe81b7eb89aee12bff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 29, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "King-Of-The-Cookies/SSIPWebapp", "src_encoding": "UTF-8", "text": "# SSIPWebapp\nwebapp for ssip\n" } ]
2
Antrixauras/Music_Player_with_GUI
https://github.com/Antrixauras/Music_Player_with_GUI
33925c12c36b2199398446f5e2f14b916b83ca38
c8a17c7ac2ee21b600ea2bbfa6e630bd3e4a13a7
7632aaad7b66aabeca8004a725802fe7f3be8a4f
refs/heads/master
2023-04-10T13:26:07.475744
2021-04-18T03:26:38
2021-04-18T03:26:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5713592171669006, "alphanum_fraction": 0.5914239287376404, "avg_line_length": 30.217172622680664, "blob_id": "aaababe7a48537ad6ab093cd73137cebebfa971c", "content_id": "d3dfdb3452a7079bc3b68dd029edb1163bbde8b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6180, "license_type": "no_license", "max_line_length": 138, "num_lines": 198, "path": "/Music_player.py", "repo_name": "Antrixauras/Music_Player_with_GUI", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom pygame import mixer\nimport tkinter.messagebox\nfrom tkinter import filedialog\nfrom mutagen.mp3 import MP3\nimport os\n\nroot = Tk()\nroot.title(\"Music Player\")\nroot.iconbitmap('music.ico')\nroot.geometry('550x670')\nroot.resizable(0,0)\n\nmixer.init()\n\n\ndef on_closing():\n from tkinter import messagebox\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n root.destroy()\n # cv2.destroyAllWindows()\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\ndef info():\n tkinter.messagebox.showinfo('About us','This is a music player which works on your choice!!')\n\ndef play_music():\n try:\n global selected_song\n selected_song = listbox.curselection()\n selected_song = int(selected_song[0])\n mixer.music.load(listofsongs[selected_song])\n mixer.music.play()\n listbox.itemconfig(selected_song,bg = 'turquoise3')\n statusbar['text'] = 'Playing '+ os.path.basename(os.path.basename(listofsongs[selected_song]))\n audio = MP3(listofsongs[selected_song])\n len = audio.info.length\n min, sec = divmod(len, 60)\n min = round(min)\n sec = round(sec)\n timeformat = '{:02d}:{:02d}'.format(min, sec)\n show_length['text'] = 'Length -' + ' ' + timeformat\n\n except Exception as e:\n print(e)\n tkinter.messagebox.showerror('Error','No file chosen!!')\n\ndef stop_music():\n mixer.music.stop()\n statusbar['text'] = 'Music stopped'\n\ndef set_vol(val):\n volume=int(val)/100\n mixer.music.set_volume(volume)\n\nlistofsongs = []\ndef add_to_playlist(f):\n global listofsongs\n global listbox\n global index\n index = 0\n os.chdir(f)\n for i in os.listdir(f):\n if i.endswith('.mp3'):\n filename = os.path.basename(i)\n file = os.path.realpath(i)\n listbox.insert(index,filename)\n listofsongs.append(file)\n index+=1\n mixer.music.load(listofsongs[0])\n mixer.music.play()\n statusbar['text'] = 'Playing music '+os.path.basename(listofsongs[0])\n audio = MP3(listofsongs[0])\n len = audio.info.length\n min, sec = divmod(len, 60)\n min = round(min)\n sec = round(sec)\n timeformat = '{:02d}:{:02d}'.format(min, sec)\n show_length['text'] = 'Length -' + ' ' + timeformat\n listbox.itemconfig(0, bg = 'turquoise3')\n\n\npause = True\ndef pause_music():\n global pause\n if pause:\n mixer.music.pause()\n statusbar['text'] = 'Music paused'\n pause = False\n else:\n mixer.music.unpause()\n statusbar['text'] = 'Music unpaused'\n pause = True\n\n# for menu bar .......................................................................................\nmenubar = Menu(root)\nroot.config(menu = menubar)\n\nsubMenu = Menu(menubar,tearoff = 0)\nmenubar.add_cascade(label = 'File',menu = subMenu)\nmenubar.add_cascade(label = 'About',command = info)\nsubMenu.add_command(label = 'Exit',command = root.destroy)\n\n\n# Root ..............................................................................................\n\ntext = Label(root)\ntext.pack(fill = X)\n\n\n# Top Frame ...........................................................................................\ntop_frame = Frame(root)\ntop_frame.pack()\n\n\nlistbox = Listbox(top_frame,selectmode=ACTIVE,width=90,height=20,fg='white',bg = 'black',font = ('times',10),relief = SUNKEN)\nlistbox.pack(fill = X)\n\nsb = Scrollbar(root,orient = 'vertical')\nsb.configure(command = listbox.yview)\nsb.pack(side = 'right',fill = 'y')\nlistbox.configure(yscrollcommand = sb.set)\n\n\n\nshow_length = Label(root,text = \"Length - --:-- \",font = ('times',13))\n# show_length.grid(row = 1,column = 0,pady = 10)\nshow_length.place(x = 10,y = 370)\n\n\n# Lowest Frame ...............................................................................................\n\nlowest_frame = Frame(root)\nlowest_frame.pack()\n\ndef add_button():\n file1 = filedialog.askopenfilename()\n print(file1)\n file2 = os.path.basename(file1)\n listbox.insert(0,file2)\n listofsongs.insert(0,file1)\n\nadd_button = Button(lowest_frame,text = 'ADD SONG',bg = 'yellow',fg = 'black',width = 10,font = ('times',10,'bold'),command = add_button)\nadd_button.pack(side = LEFT,padx = 30,pady = 40)\n\ndef del_song():\n deletedsong = listbox.curselection()\n deletedsong = int(deletedsong[0])\n listbox.delete(deletedsong)\n listofsongs.remove(listofsongs[deletedsong])\n print(listofsongs)\n\ndel_button = Button(lowest_frame,text = 'DEL SONG',bg = 'yellow',fg = 'black',width = 10,font = ('times',10,'bold'),command = del_song)\ndel_button.pack(side = LEFT,pady = 40)\n\n\n# Middle Frame...........................................................................................\n\nfrom PIL import Image,ImageTk\n\nmiddle_frame = Frame(root)\nmiddle_frame.pack(pady = 10)\n\nimg1 = ImageTk.PhotoImage(Image.open('play_button.png'))\nimage1 = Button(middle_frame,image = img1,activebackground = 'black',command = play_music)\nimage1.grid(row = 0,column = 0,padx = 10)\n\nimg3 = ImageTk.PhotoImage(Image.open('pause_button.png'))\nimage3 = Button(middle_frame,image = img3,activebackground = 'black',command = pause_music)\nimage3.grid(row = 0,column = 1,padx = 10)\n\nimg2 = ImageTk.PhotoImage(Image.open('stop_button.png'))\nimage2 = Button(middle_frame,image = img2,activebackground = 'black',command = stop_music)\nimage2.grid(row = 0,column = 2,padx = 10)\n\n# Bottom Frame ..................................................................................................\n\nbottom_frame = Frame(root)\nbottom_frame.pack()\n\nscale1 = Scale(bottom_frame,from_ =0,to = 100,orient = HORIZONTAL,command = set_vol,resolution = 10)\nscale1.set(70)\nscale1.grid(row = 0,column = 3)\n# mixer.music.set_volume(0.7)\n\nvolume = Label(bottom_frame,text = 'VOLUME',font = ('times',10,'bold'))\nvolume.grid(row = 1,column = 3)\n\n# Root ..................................................................................................\n\nstatusbar = Label(root,text = 'Play music',relief = SUNKEN,anchor = W,bg = 'black',fg = 'white')\nstatusbar.pack(side = BOTTOM,fill = X)\n\nglobal filename_path\nfilename_path = filedialog.askdirectory()\nadd_to_playlist(filename_path)\n\nroot.mainloop()" }, { "alpha_fraction": 0.7170923352241516, "alphanum_fraction": 0.7210215926170349, "avg_line_length": 19.360000610351562, "blob_id": "50cd30b41a6993adb7d8bca21feaa998f1fd7142", "content_id": "5f15deb25b22589c0faf4ffe2b2cab295ff68044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 520, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/README.md", "repo_name": "Antrixauras/Music_Player_with_GUI", "src_encoding": "UTF-8", "text": "# Music_Player_GUI ๐ŸŽถ๐Ÿ‘ฉโ€๐Ÿ’ป\nPlay music from playlist of your choice\n\n## Code Requirements\n- Tkinter(available in python)\n- pygame (`pip install pygame`)\n- mutagen (`pip install mutagen`)\n\n## About\nA simple GUI of Music Player which play songs of the selected playlist.\n\n## Functions\n- Play song\n- Pause song\n- stop song\n- Add song\n- Delete song\n- Volume controller\n- Length of song\n\n### --> Play music\n<img src=\"musicplayer.png\">\n\n## Note\nYou'll need Python 3.6 as project interpreter in case working on Pycharm.\n" } ]
2
jiaoyiping630/BMSG-GAN
https://github.com/jiaoyiping630/BMSG-GAN
905b40c850a9abd40f244eb0b9e82f19942f8a9d
32ff7fb1bf89fede5a97121fe5d3c04d9cb0a92f
aceea8f98cc2af641bc641c15efa0fafc3d6f3dc
refs/heads/master
2020-12-05T20:21:19.770719
2020-01-27T11:23:15
2020-01-27T11:23:15
232,236,117
0
0
null
2020-01-07T03:38:05
2020-01-04T16:15:24
2019-07-24T17:51:47
null
[ { "alpha_fraction": 0.5398229956626892, "alphanum_fraction": 0.5523598790168762, "avg_line_length": 32.48147964477539, "blob_id": "b2fb94edb0513c76f7d7b20660e638e4cf56e252", "content_id": "02e5a5284c42217a5dde4ed8c5b166edf1ef2784", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2798, "license_type": "permissive", "max_line_length": 88, "num_lines": 81, "path": "/sourcecode/extract.py", "repo_name": "jiaoyiping630/BMSG-GAN", "src_encoding": "UTF-8", "text": "def main():\n import os\n import numpy as np\n import torch as th\n from torch.backends import cudnn\n cudnn.benchmark = True\n device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n\n from pinglib.files import get_file_list, create_dir\n from pinglib.utils import save_variables\n from PIL import Image\n\n image_folder = r\"D:\\Projects\\anomaly_detection\\datasets\\Camelyon\\test_negative\"\n save_path = r\"D:\\Projects\\anomaly_detection\\BMSG_GAN_test_neg.pkl\"\n model_path=r\"D:\\Projects\\anomaly_detection\\progresses\\MSG-GAN\\Models\\GAN_DIS_73.pth\"\n\n '''-----------------ๅปบ็ซ‹ๆ•ฐๆฎ้›†ๅ’Œๆ•ฐๆฎ่ฝฝๅ…ฅๅ™จ----------------'''\n\n from torch.utils.data import Dataset\n from torchvision.transforms import ToTensor, Resize, Compose, Normalize\n\n class Dataset4extract(Dataset):\n def __init__(self, image_paths):\n self.image_paths = image_paths\n self.transform = Compose([\n ToTensor(),\n Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n img = Image.open(self.image_paths[idx])\n\n img = self.transform(img)\n\n if img.shape[0] == 4:\n # ignore the alpha channel\n # in the image if it exists\n img = img[:3, :, :]\n return img\n\n image_paths = get_file_list(image_folder, ext='jpg')\n dataset = Dataset4extract(image_paths)\n print(\"Total number of images in the dataset:\", len(dataset))\n\n from torch.utils.data import DataLoader\n dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)\n\n '''-----------------ๅปบ็ซ‹ๆจกๅž‹----------------'''\n from MSG_GAN.GAN import MSG_GAN\n depth = 7\n msg_gan = MSG_GAN(depth=depth,\n latent_size=512,\n use_eql=True,\n use_ema=True,\n ema_decay=0.999,\n device=device)\n\n msg_gan.dis.load_state_dict(th.load(model_path))\n\n '''-----------------่ฟ›่กŒ่ฏ„ไผฐ----------------'''\n features = []\n from torch.nn.functional import avg_pool2d\n\n for (i, batch) in enumerate(dataloader):\n # ่Žทๅ–ๅคšๅˆ†่พจ็އ็š„ๅ›พๅƒ่พ“ๅ…ฅ\n images = batch.to(device)\n\n images = [images] + [avg_pool2d(images, int(np.power(2, i)))\n for i in range(1, depth)]\n images = list(reversed(images))\n\n # ๆŠŠ่ฟ™ไบ›ๅ›พๅƒไธข็ป™ๆจกๅž‹\n feature = msg_gan.extract(images)\n features.append(feature.detach().cpu().numpy())\n\n '''-----------------ไฟๅญ˜็ป“ๆžœ----------------'''\n features = np.concatenate(features, axis=0)\n save_variables([features], save_path)\n" } ]
1
gnterrell/hello-world
https://github.com/gnterrell/hello-world
76af80963e9cfc142cbe4abc3e074e8be1b8e038
b2699a1c21d9543bcca416c8b992af560505f18a
992529646d807c812ac25319ad49fe0e96d0e75c
refs/heads/master
2021-05-01T14:54:27.567460
2018-03-03T20:49:46
2018-03-03T20:49:46
121,026,072
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6104166507720947, "alphanum_fraction": 0.762499988079071, "avg_line_length": 38.58333206176758, "blob_id": "71e4fad344b6ee44cd97f7de3808ffc66cd38100", "content_id": "651080659d4938e830fe674711b43802e20395d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 480, "license_type": "no_license", "max_line_length": 103, "num_lines": 12, "path": "/README.md", "repo_name": "gnterrell/hello-world", "src_encoding": "UTF-8", "text": "# Deep Learning Feed Forward Neural Network\nBBDS Deep Learning \n# Here is an example output from a training \nStep Count:1000 \nTraining accuracy: 0.7799999713897705 loss:0.5227694511413574 \nTest accuracy: 0.8349999785423279 loss: 0.46897435188293457 \n \nAccuracy \n![alt text](https://raw.githubusercontent.com/gnterrell/hello-world/master/Accuracy.PNG \"accuracy\") \n \nLoss \n![alt text](https://raw.githubusercontent.com/gnterrell/hello-world/master/Loss.PNG \"loss\") \n" }, { "alpha_fraction": 0.6213096976280212, "alphanum_fraction": 0.6642512083053589, "avg_line_length": 37.020408630371094, "blob_id": "86cfb1e42d9bd42a4236db6bf1c3265fe632e92b", "content_id": "f7b3f96c55ce36a7ea33fae119b1457ea6c49081", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3726, "license_type": "no_license", "max_line_length": 131, "num_lines": 98, "path": "/censusData.py", "repo_name": "gnterrell/hello-world", "src_encoding": "UTF-8", "text": "\"\"\"\nProject 1\n\nAt the end you should see something like this\nStep Count:1000\nTraining accuracy: 0.8999999761581421 loss: 0.42281264066696167\nTest accuracy: 0.8199999928474426 loss: 0.4739704430103302\n\nplay around with your model to try and get an even better score\n\"\"\"\n\nimport tensorflow as tf\nimport dataUtils\n\ntraining_data, training_labels = dataUtils.readData(\"project1trainingdata.csv\")\ntest_data, test_labels = dataUtils.readData(\"project1testdata.csv\")\n\n\n# Build tensorflow blueprint\n## Tensorflow placeholder\ninput_placeholder = tf.placeholder(tf.float32, shape=[None, 113])\n## Neural network hidden layers\n# layer 1\nweight1 = tf.get_variable(\"weight1\", shape=[113, 150], initializer=tf.contrib.layers.xavier_initializer())\nbias1 = tf.get_variable(\"bias1\", shape=[150], initializer=tf.contrib.layers.xavier_initializer())\nhidden_layer_1 = tf.nn.dropout(tf.nn.relu(tf.matmul(input_placeholder, weight1) + bias1), keep_prob=0.5)\n\n# layer 2\nweight2 = tf.get_variable(\"weight2\", shape=[150, 125], initializer=tf.contrib.layers.xavier_initializer())\nbias2 = tf.get_variable(\"bias2\", shape=[125], initializer=tf.contrib.layers.xavier_initializer())\nhidden_layer_2 = tf.nn.dropout(tf.nn.relu(tf.matmul(hidden_layer_1, weight2) + bias2), keep_prob=0.5)\n\n# layer 3\nhidden_layer_3 = tf.nn.dropout(tf.layers.dense(hidden_layer_2, 100, activation=tf.nn.relu), keep_prob=0.5)\n\n## Logit layer\nlogits = tf.nn.softmax(tf.layers.dense(hidden_layer_3, 2, activation=None))\n\n## label placeholder\nlabel_placeholder = tf.placeholder(tf.float32, shape=[None, 2])\n\n## loss function\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label_placeholder, logits=logits))\n## backpropagation algorithm\ntrain = tf.train.AdamOptimizer().minimize(loss)\n\naccuracy = dataUtils.accuracy(logits, label_placeholder)\n\n# summaries\ntf.summary.scalar('accuracy', accuracy)\ntf.summary.scalar('loss', loss)\nmerged = tf.summary.merge_all()\n\nsaver = tf.train.Saver()\n\n## Make tensorflow session\nwith tf.Session() as sess:\n summary_writer = tf.summary.FileWriter(\"/tmp/project1\",\n sess.graph)\n\n ## Initialize variables\n sess.run(tf.global_variables_initializer())\n\n\n step_count = 0\n while True:\n step_count += 1\n\n batch_training_data, batch_training_labels = dataUtils.getBatch(data=training_data, labels=training_labels, batch_size=100)\n\n training_accuracy, training_loss, logits_output, _ = \\\n sess.run([accuracy, loss, logits, train],\n feed_dict={input_placeholder: batch_training_data,\n label_placeholder: batch_training_labels})\n\n # every 10 steps check accuracy\n if step_count % 10 == 0:\n batch_test_data, batch_test_labels = dataUtils.getBatch(data=test_data, labels=test_labels,\n batch_size=1000)\n test_accuracy, test_loss, summary_merged = sess.run([accuracy, loss, merged],\n feed_dict={input_placeholder: batch_test_data,\n label_placeholder: batch_test_labels})\n\n summary_writer.add_summary(summary_merged, step_count)\n\n #print(\"Logits {}\".format(logits_output))\n print(\"Step Count:{}\".format(step_count))\n print(\"Training accuracy: {} loss:{}\".format(training_accuracy, training_loss))\n print(\"Test accuracy: {} loss: {}\".format(test_accuracy, test_loss))\n\n\n if step_count % 100 == 0:\n save_path = saver.save(sess, \"/tmp/model{}.ckpt\".format(step_count))\n\n\n # stop training after 100 steps\n if step_count > 1000:\n break\n" } ]
2
marianp38/my-proyect-one
https://github.com/marianp38/my-proyect-one
88c75b68c63cef20fa48b2bd4d8f79ab8ef0379b
74b65b0f182cdc133ddaf5b8fb9142a329cbd89a
7ade6abd912c339e721dec0bcd8482ec7b7163d9
refs/heads/master
2023-07-02T19:26:58.113493
2021-08-09T14:45:57
2021-08-09T14:45:57
394,325,274
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4885542094707489, "alphanum_fraction": 0.516867458820343, "avg_line_length": 21.72602653503418, "blob_id": "942e68a9ce525bdd43acd16d2625c348a4532cb8", "content_id": "eb9c83024fe35127530602582494e9d7f0c1ea3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "no_license", "max_line_length": 81, "num_lines": 73, "path": "/assert.py", "repo_name": "marianp38/my-proyect-one", "src_encoding": "UTF-8", "text": "import unittest\n\n\nclass PruebaDeStandards(unittest.TestCase):\n\n def test_suma(self):\n a = 2 + 2\n b = 3 + 1\n self.assertEqual(a, b)\n\n def test_otra_suma(self):\n a = 5 + 1\n b = 8 + 20\n self.assertNotEqual(a, b)\n\n def test_algo_es_verdadero(self):\n a = 5+4\n b = 3+6\n self.assertTrue(a == b)\n\n def test_algo_mas_es_verdadero(self):\n self.assertTrue(3+3 == 4+2)\n\n def test_algo_es_falso(self):\n self.assertFalse(2+1 == 3+5)\n\n def test_otro_falso(self):\n self.assertFalse(3+5 == 2+4, 'Esto deberia ser falso')\n\n def test_algo_es_mayor(self):\n a = 5\n b = 3\n self.assertTrue(a > b)\n\n def test_otra_cosa_es_mayor(self):\n a = 5\n b = 2\n self.assertGreater(a, b, 'El primero debe ser mas grande que el segundo')\n\n def test_algo_es_mayor_o_igual(self):\n a = 5\n b = 4\n self.assertGreaterEqual(a, b)\n\n def test_algo_es_menor(self):\n a = 5\n b = 8\n self.assertLess(a, b)\n\n def test_algo_es_menor_o_igual(self):\n a = 4\n b = 4\n self.assertLessEqual(a, b)\n\n def test_comparar_listas(self):\n a = [1, 2, 'Fruta']\n b = [1, 2, 'Fruta']\n self.assertListEqual(a, b)\n\n def test_comparar_tuplas(self):\n a = (1, 2, 3)\n b = (1, 2, 3)\n self.assertTupleEqual(a, b)\n\n def test_comparar_diccionarios(self):\n a = {'id':1, 'Nombre':'Nelson', 'Apellido':'Perez'}\n b = {'id':1, 'Nombre':'Nelson', 'Apellido':'Perez'}\n self.assertDictEqual(a, b)\n \n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.6552217602729797, "alphanum_fraction": 0.6552217602729797, "avg_line_length": 32.238094329833984, "blob_id": "c4a763188342cd7dd8a0385038244d62c0ebf0f8", "content_id": "90a83ae6da89d45c043994ed26560b9ee8470c7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 84, "num_lines": 21, "path": "/pageCompra.py", "repo_name": "marianp38/my-proyect-one", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n\nclass PageCompra:\n\n\n def __init__(self, my_driver):\n self.driver = my_driver\n self.quantity_value = (By.XPATH, '//*[@id=\"quantity_wanted\"]')\n self.button_value = (By.CLASS_NAME, 'icon-plus')\n\n def enter_quantity(self, quantity):\n self.driver.find_element(*self.quantity_value).clear()\n self.driver.find_element(*self.quantity_value).send_keys(quantity)\n\n def click_button_value(self, quantity):\n for i in range(quantity):\n self.driver.find_element(*self.button_value).click()\n\n\n def return_value(self):\n return self.driver.find_element(*self.quantity_value).get_attribute('value')\n\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "d065b2a246f34fd3b11d3ff4517712cfb0379d2e", "content_id": "36f254e9c9d0fc0f7a62092138b87a4f5035c5ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "no_license", "max_line_length": 38, "num_lines": 2, "path": "/README.md", "repo_name": "marianp38/my-proyect-one", "src_encoding": "UTF-8", "text": "# my-proyect-one\nEste es mi primer proyecto de practica\n" }, { "alpha_fraction": 0.6464471220970154, "alphanum_fraction": 0.649046778678894, "avg_line_length": 35, "blob_id": "ec4301f0229b55ac0a49e7937ad2d2ec9f37ecb1", "content_id": "17e9184f6b896681c834b2d0cbf2e365b3392eb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 77, "num_lines": 32, "path": "/pageItems.py", "repo_name": "marianp38/my-proyect-one", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\n\nclass PageItems:\n\n def __init__(self, my_driver):\n self.no_results_banner = (By.XPATH, '//*[@id=\"center_column\"]/p')\n self.title_banner = (By.XPATH, '//*[@id=\"center_column\"]/h1/span[1]')\n self.orange_button = (By.ID, 'color_1')\n self.driver = my_driver\n self.order = (By.ID, 'selectProductSort')\n\n def return_no_element_text(self):\n return self.driver.find_element(*self.no_results_banner).text\n\n def return_section_title(self):\n return self.driver.find_element(*self.title_banner).text\n\n def click_orange_button(self):\n self.driver.find_element(*self.orange_button).click()\n\n def select_by_text(self, text):\n order = Select(self.driver.find_element(*self.order))\n order.select_by_visible_text(text)\n\n def select_by_value(self, text):\n order = Select(self.driver.find_element(*self.order))\n order.select_by_value(text)\n\n def select_by_index(self, text):\n order = Select(self.driver.find_element(*self.order))\n order.select_by_index(text)\n\n\n" } ]
4
hvardhanx/company-website
https://github.com/hvardhanx/company-website
f7272f9630d1c1592f07a637da3e8f5a222bf7f2
1f3486f07b2797d4798207dd0c68e4fa1c8b8027
261ecc9778efa68760935cef163b16ef4bf26608
refs/heads/master
2021-08-29T20:41:56.353270
2017-12-14T23:35:23
2017-12-14T23:35:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5578168630599976, "alphanum_fraction": 0.5578168630599976, "avg_line_length": 33.83871078491211, "blob_id": "5320fcd877f8ae4bf3b306709cf353f5bce17998", "content_id": "eb2b31e8d9ad069b5229f27591668f7bc3ee77fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 105, "num_lines": 31, "path": "/logic/emails/email_wrapper.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "from config import constants\nfrom util import sendgrid_wrapper as sgw\n\nBCC_RECIPIENTS = [sgw.Email('[email protected]', 'Founders')]\n\ndef send_email_msg(from_email, to_email, msg_subject, msg_category,\n msg_text, msg_html):\n\n try:\n categories = [msg_category]\n\n if 'localhost' in constants.HOST or 'pagekite' in constants.HOST:\n sgw.send_message(\n sender=from_email,\n recipients=[sgw.Email(constants.DEV_EMAIL, constants.DEV_EMAIL)],\n subject='DEV: ' + msg_subject,\n body_text=msg_text,\n body_html=msg_html,\n categories=categories)\n else:\n sgw.send_message(\n sender=from_email,\n recipients=[to_email],\n subject=msg_subject,\n body_text=msg_text,\n body_html=msg_html,\n bccs=BCC_RECIPIENTS,\n categories=categories)\n\n except Exception as e:\n sgw.notify_admins(\"Unable to send email message to \" + to_email.email + \" because \\n\\n\" + str(e))\n\n" }, { "alpha_fraction": 0.6908893585205078, "alphanum_fraction": 0.7017353773117065, "avg_line_length": 24.63888931274414, "blob_id": "1c3d9d5738b38371808721fd66ed32e10218f496", "content_id": "bb08f4d8bc267b8532834c6797b11310c64650fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/views/web_views.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "from flask import redirect\nfrom flask import render_template\nfrom flask import request\n\nfrom app import app\nfrom config import constants\nfrom logic.emails import send_emails\n\n# force https on prod\[email protected]_request\ndef beforeRequest():\n if constants.HTTPS:\n if not request.url.startswith('https'):\n return redirect(request.url.replace('http', 'https', 1))\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/team')\ndef team():\n return render_template('team.html')\n\[email protected]('/whitepaper')\ndef whitepaper():\n return redirect('/static/docs/whitepaper_v2.pdf', code=302)\n\[email protected]('/product-brief')\ndef product_brief():\n return redirect('/static/docs/product_brief_v15.pdf', code=302)\n\[email protected]('/signup', methods=['POST','GET'])\ndef signup():\n email = request.args.get(\"email\")\n send_result = send_emails.send_welcome(email)\n return send_result" }, { "alpha_fraction": 0.5247410535812378, "alphanum_fraction": 0.7261219620704651, "avg_line_length": 18.311111450195312, "blob_id": "c28b822760ea2c068ee2e655f406a20d6af0e00f", "content_id": "c9f6641e426b2fc00b57eb61f093a9eec1e69caa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 869, "license_type": "no_license", "max_line_length": 97, "num_lines": 45, "path": "/requirements.txt", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "-e git+https://github.com/UnicycleLabs/apilib@ffebb0851aa41b012bb9277544617ec902be65a2#egg=apilib\nappier==1.7.39\nappnope==0.1.0\nbackports.shutil-get-terminal-size==1.0.0\ncertifi==2016.9.26\ncffi==1.9.1\nclick==6.6\ncryptography==1.5.3\ndecorator==4.0.10\nenum34==1.1.6\nFlask==0.11.1\nFlask-SQLAlchemy==2.1\ngnureadline==6.3.3\ngunicorn==19.7.1\nidna==2.1\nipaddress==1.0.17\nipython==5.1.0\nipython-genutils==0.1.0\nitsdangerous==0.24\nJinja2==2.8\nMarkupSafe==0.23\npath.py==8.2.1\npathlib2==2.1.0\npexpect==4.2.1\npickleshare==0.7.4\nprompt-toolkit==1.0.9\npsycopg2==2.7.3.2\nptyprocess==0.5.1\npyasn1==0.1.9\npycparser==2.17\npyflakes==1.3.0\nPygments==2.1.3\npyOpenSSL==16.2.0\npython-dateutil==2.6.0\npython-env==1.0.0\npython-http-client==3.0.0\nrequests==2.11.1\nsendgrid==5.3.0\nsimplegeneric==0.8.1\nsix==1.10.0\nSQLAlchemy==1.1.3\ntraitlets==4.3.1\nurllib3==1.19\nwcwidth==0.1.7\nWerkzeug==0.11.11\n" }, { "alpha_fraction": 0.6601226925849915, "alphanum_fraction": 0.6674846410751343, "avg_line_length": 28.14285659790039, "blob_id": "ab2384b0b4ada0808e4445fa0b2327b046280c86", "content_id": "ae0787635018d2d2d0a629a613c32f1b0ac26529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 82, "num_lines": 28, "path": "/logic/emails/send_emails.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "import re\n\nfrom flask import jsonify\n\nfrom config import universal\nfrom database import db, db_common, db_models\nfrom logic.emails import email_types\nfrom util import sendgrid_wrapper as sgw\n\nDEFAULT_SENDER = sgw.Email(universal.CONTACT_EMAIL, universal.BUSINESS_NAME)\n\ndef send_welcome(email):\n if not re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", email):\n return jsonify(\"Please enter a valid email address\")\n\n try:\n me = db_models.EmailList()\n me.email = email\n me.unsubscribed = False\n db.session.add(me)\n db.session.commit()\n except:\n return jsonify('You are already signed up!')\n\n to_email = sgw.Email(email, email)\n email_types.send_email_type('welcome', DEFAULT_SENDER, to_email)\n\n return jsonify('Thanks for signing up!')" }, { "alpha_fraction": 0.7264957427978516, "alphanum_fraction": 0.7264957427978516, "avg_line_length": 23.41666603088379, "blob_id": "6714cff9dba39e68db45f86ad2d5a5d7f2766776", "content_id": "b212acb432ecca7554a4e147763451855949747f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/config/constants.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "import os\nimport dotenv\n\ndotenv.load() or dotenv.load('.env')\n\nDEV_EMAIL = dotenv.get('DEV_EMAIL', default=None)\n\nDEBUG = dotenv.get('DEBUG', default=False)\n\nHOST = dotenv.get('HOST')\nHTTPS = dotenv.get('HTTPS', default=True)\n\nPROJECTPATH = dotenv.get('PROJECTPATH')\n\nFLASK_SECRET_KEY = dotenv.get('FLASK_SECRET_KEY')\n\nAPP_LOG_FILENAME = os.path.join(PROJECTPATH, 'app.log')\n\nSQLALCHEMY_DATABASE_URI = dotenv.get('DATABASE_URL')\n\nSENDGRID_API_KEY = dotenv.get('SENDGRID_API_KEY')\n\nTEMPLATE_ROOT = os.path.join(PROJECTPATH, 'templates')\nSTATIC_ROOT = os.path.join(PROJECTPATH, 'static')" }, { "alpha_fraction": 0.7231183052062988, "alphanum_fraction": 0.7311828136444092, "avg_line_length": 23.799999237060547, "blob_id": "38c64d534a63026d31ffd7d3cab1362c610d463e", "content_id": "ca4b7bfdbef3c42ec28491b508423fb6d8b35925", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/database/db_models.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "\nfrom decimal import *\n\nfrom sqlalchemy.orm import deferred\nfrom sqlalchemy.orm import relationship\n\nfrom database import db, db_common\n\nclass EmailList(db.Model):\n\t__tablename__ = 'email_list'\n\n\temail = db.Column(db.String(255), primary_key=True, autoincrement=False)\n\tunsubscribed = db.Column(db.Boolean(), unique=False)\n\n\tdef __str__(self):\n\t\treturn '%s' % (self.email)" }, { "alpha_fraction": 0.7237903475761414, "alphanum_fraction": 0.725806474685669, "avg_line_length": 34.42856979370117, "blob_id": "2946374f84d87d2d697a29659e58a4f6e0f498ee", "content_id": "c293ee3c424cad4c26170b6bc1586635239bf4b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 992, "license_type": "no_license", "max_line_length": 230, "num_lines": 28, "path": "/README.md", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "# originprotocol.com\n\nThis is a pretty simple bare bones Flask app with the source code for [originprotocol.com](https://www.originprotocol.com). The code is all `Python 2.7` and we use `Postgres` for the database (basically just for the mailing list).\n\nTo get started (we recommend doing this in an virtualenv):\n\n git clone https://github.com/OriginProtocol/company-website.git\n pip install -r requirements.txt\n \nWe should also mention that the app expects a `.env` file in your root directory that looks something like this:\n\n DEV_EMAIL = \"[email protected]\"\n DEBUG = True\n\n HTTPS = False\n\n PROJECTPATH = \"/\"\n\n FLASK_SECRET_KEY = putyoursupersecretkeyhere\n\n DATABASE_URL = postgresql://localhost/origin\n\n SENDGRID_API_KEY = putyoursupersecretkeyhere\n\n TEMPLATE_ROOT = os.path.join(PROJECTPATH, 'templates')\n STATIC_ROOT = os.path.join(PROJECTPATH, 'static')\n \nHit us up in the `eng-website` channel on [Slack](http://slack.originprotocol.com) if you need help.\n" }, { "alpha_fraction": 0.7012383937835693, "alphanum_fraction": 0.7012383937835693, "avg_line_length": 29.809524536132812, "blob_id": "40fe7d671b1b8e1dc760cec2c07c8b3c24763a3d", "content_id": "bbbad017b1acbebb8c204b5ab3d383a6423fe515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 81, "num_lines": 21, "path": "/logic/emails/email_types.py", "repo_name": "hvardhanx/company-website", "src_encoding": "UTF-8", "text": "from flask import render_template\n\nfrom config import universal\nfrom logic.emails import email_wrapper\nfrom util import sendgrid_wrapper as sgw\n\nEMAILS = {\n 'welcome': {\n 'subject': 'Welcome to Origin Protocol'\n }\n}\n\ndef send_email_type(email_type, from_email, to_email):\n msg_subject = EMAILS.get(email_type).get('subject')\n msg_category = email_type\n\n msg_text = render_template('email/%s.txt' % email_type, universal=universal)\n msg_html = render_template('email/%s.html' % email_type, universal=universal)\n\n email_wrapper.send_email_msg(from_email, to_email, \n msg_subject, msg_category, msg_text, msg_html)" } ]
8
enthalpychange/take-a-number
https://github.com/enthalpychange/take-a-number
e22999c8a5f7c8b6de091a544e50734819b5042e
34b9e274cc0a95191d10e674d3ae0732d7638d88
e230ac0d455e705104d25cd09e44382b1baef846
refs/heads/master
2021-10-12T06:30:11.042359
2020-03-03T03:56:03
2020-03-03T03:56:03
242,454,322
0
0
MIT
2020-02-23T04:10:36
2020-03-03T03:56:39
2021-09-22T18:40:46
Python
[ { "alpha_fraction": 0.7075679898262024, "alphanum_fraction": 0.7097722291946411, "avg_line_length": 34.81578826904297, "blob_id": "8c461a807e6799f85884a7f13f9f05656007c596", "content_id": "d30d6b2d2fcf6d95978739163dcfd253a46611d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "permissive", "max_line_length": 75, "num_lines": 38, "path": "/tan/identity/models.py", "repo_name": "enthalpychange/take-a-number", "src_encoding": "UTF-8", "text": "from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.db import models\n\n\nclass IdentityManager(BaseUserManager):\n\n def _create_identity(self, identifier, password=None, **kwargs):\n identifier = self.model.normalize_username(identifier)\n identity = self.model(identifier=identifier, **kwargs)\n identity.set_password(password)\n identity.save(using=self._db)\n return identity\n\n def create_user(self, identifier, password=None, **kwargs):\n kwargs.setdefault('is_staff', False)\n kwargs.setdefault('is_superuser', False)\n return self._create_identity(identifier, password, **kwargs)\n\n def create_superuser(self, identifier, password=None, **kwargs):\n kwargs.setdefault('is_staff', True)\n kwargs.setdefault('is_superuser', True)\n return self._create_identity(identifier, password, **kwargs)\n\n\nclass Identity(AbstractBaseUser, PermissionsMixin):\n identifier = models.CharField(max_length=256, unique=True)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n\n class Meta:\n verbose_name_plural = 'identities'\n\n objects = IdentityManager()\n\n USERNAME_FIELD = 'identifier'\n EMAIL_FIELD = 'identifier'\n REQUIRED_FIELDS = []\n" }, { "alpha_fraction": 0.6003236174583435, "alphanum_fraction": 0.6294498443603516, "avg_line_length": 22.769229888916016, "blob_id": "3613a39bb6d82e84c42a8036e41e92003a0b58da", "content_id": "d807ecb1232f53fe4123c6f1c9a065936b0851c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "permissive", "max_line_length": 79, "num_lines": 26, "path": "/config/settings/local.py", "repo_name": "enthalpychange/take-a-number", "src_encoding": "UTF-8", "text": "\"\"\"\nSettings for local development.\nDo not use these settings for production!\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\nfrom .base import * # noqa\n\nSECRET_KEY = 'secret key for local development only!'\n\nDEBUG = True\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n#\n# docker run -d -e \"POSTGRES_PASSWORD=postgres\" -p 127.0.0.1:5432:5432 postgres\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'HOST': 'localhost',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n 'PORT': '',\n }\n}\n" }, { "alpha_fraction": 0.6768016219139099, "alphanum_fraction": 0.6793213486671448, "avg_line_length": 38.686668395996094, "blob_id": "363b1b247a76de9b7845e427d0d1e3533805b2bf", "content_id": "f019ef78726631e7d56d92a56c007ee3ea57dc4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5955, "license_type": "permissive", "max_line_length": 107, "num_lines": 150, "path": "/tan/identity/admin.py", "repo_name": "enthalpychange/take-a-number", "src_encoding": "UTF-8", "text": "# Copyright (c) Django Software Foundation and individual contributors.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n\n# 3. Neither the name of Django nor the names of its contributors may be used\n# to endorse or promote products derived from this software without\n# specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.forms import AdminPasswordChangeForm\nfrom django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher\nfrom django.utils.translation import gettext\n\nfrom .models import Identity\n\n\nclass ReadOnlyPasswordHashWidget(forms.Widget):\n template_name = 'auth/widgets/read_only_password_hash.html'\n read_only = True\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n summary = []\n if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX):\n summary.append({'label': gettext('No password set.')})\n else:\n try:\n hasher = identify_hasher(value)\n except ValueError:\n summary.append({'label': gettext('Invalid password format or unknown hashing algorithm.')})\n else:\n for key, value_ in hasher.safe_summary(value).items():\n summary.append({'label': gettext(key), 'value': value_})\n context['summary'] = summary\n return context\n\n\nclass ReadOnlyPasswordHashField(forms.Field):\n widget = ReadOnlyPasswordHashWidget\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('required', False)\n super().__init__(*args, **kwargs)\n\n def bound_data(self, data, initial):\n # Always return initial because the widget doesn't\n # render an input field.\n return initial\n\n def has_changed(self, initial, data):\n return False\n\n\nclass IdentityCreationForm(forms.ModelForm):\n password1 = forms.CharField(label='Password', widget=forms.PasswordInput)\n password2 = forms.CharField(label='Confirm password', widget=forms.PasswordInput)\n\n class Meta:\n model = Identity\n fields = ('identifier',)\n\n def clean_password2(self):\n # Check that the two password entries match\n password1 = self.cleaned_data.get('password1')\n password2 = self.cleaned_data.get('password2')\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\"Passwords don't match\")\n return password2\n\n def save(self, commit=True):\n # Save the provided password in hashed format\n identity = super().save(commit=False)\n identity.set_password(self.cleaned_data['password1'])\n if commit:\n identity.save()\n return identity\n\n\nclass IdentityChangeForm(forms.ModelForm):\n password = ReadOnlyPasswordHashField(\n label=('Password'),\n help_text=(\n 'Raw passwords are not stored, so there is no way to see this '\n 'userโ€™s password, but you can change the password using '\n '<a href=\"../password\">this form</a>.'\n ),\n )\n\n class Meta:\n model = Identity\n fields = ('identifier', 'password', 'is_active', 'is_staff', 'is_superuser')\n\n def clean_password(self):\n # Regardless of what the user provides, return the initial value.\n # This is done here, rather than on the field, because the\n # field does not have access to the initial value\n return self.initial['password']\n\n\nclass IdentityAdmin(BaseUserAdmin):\n # The forms to add and change user instances\n form = IdentityChangeForm\n add_form = IdentityCreationForm\n change_password_form = AdminPasswordChangeForm\n\n # The fields to be used in displaying the User model.\n # These override the definitions on the base UserAdmin\n # that reference specific fields on auth.User.\n list_display = ('identifier', 'is_active', 'is_staff', 'is_superuser')\n list_filter = ('is_superuser',)\n fieldsets = (\n (None, {'fields': ('identifier', 'password')}),\n ('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser')}),\n )\n # add_fieldsets is not a standard ModelAdmin attribute. UserAdmin\n # overrides get_fieldsets to use this attribute when creating a user.\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('identifier', 'password1', 'password2'),\n }),\n )\n search_fields = ('identifier',)\n ordering = ('identifier',)\n filter_horizontal = ()\n\n\nadmin.site.register(Identity, IdentityAdmin)\n" } ]
3
noamz/slack-delete
https://github.com/noamz/slack-delete
160798f49933b0f50b4cfae9d7dbb3f33bd2f196
cc900deb3bc1d8e00145af2ce1b3dd56abefab81
859195efb9b8dac1a13a65ccc18958705af5f1fe
refs/heads/master
2022-04-13T05:34:16.201977
2020-04-13T15:51:58
2020-04-13T15:51:58
255,362,322
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5059464573860168, "alphanum_fraction": 0.5104063153266907, "avg_line_length": 32.63333511352539, "blob_id": "e4c72092d4daee56d1778c38c80609e8a35975cc", "content_id": "4c1855de9fedcb138b104dacce6ad0870efae756", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "permissive", "max_line_length": 77, "num_lines": 60, "path": "/slack-delete.py", "repo_name": "noamz/slack-delete", "src_encoding": "UTF-8", "text": "import argparse, slack\n\ndef _main(token, cname, just_list):\n client = slack.WebClient(token = token)\n\n response = client.conversations_list()\n if not response['ok']:\n print('Error loading channels: ' + response['error'])\n return(1)\n\n chans = [c for c in response['channels'] if c['name'] == cname]\n if len(chans) < 1:\n print('No channel named ' + cname + ' in workspace')\n return(1)\n \n cid = chans[0]['id']\n\n print('Listing messages in #' + cname + ':')\n hist = client.channels_history(channel=cid)\n msgs = hist['messages']\n msgs.reverse()\n for m in msgs:\n is_pinned = '[pinned]' if 'pinned_to' in m else ''\n print(m['ts'] + is_pinned + ' ' + m['text'])\n\n if just_list:\n return(0)\n \n print('Deleting unpinned messages in #' + cname + ':')\n delete_all = False\n for m in msgs:\n if 'pinned_to' in m:\n continue\n print(m['ts'], m['text'])\n while not delete_all:\n q = input('Delete ' + m['ts'] + '? ([n]o, [y]es, [a]ll delete) ')\n if len(q) > 0 and q[0] in ['n', 'y', 'a']:\n break\n print('Please enter n, y, or a')\n if q[0] == 'a':\n delete_all = True\n delete = delete_all or q[0] == 'y'\n\n if delete:\n print('Deleting ' + m['ts'] + '...')\n response = client.chat_delete(channel=cid, ts=m['ts'])\n if not response['ok']:\n print('Error: ' + response['error'])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', required=True,\n help='slack channel')\n parser.add_argument('-t', required=True,\n help='slack token')\n parser.add_argument('-l', dest='just_list', action='store_const',\n const=True, default=False,\n help='just list messages (do not delete)')\n args = parser.parse_args()\n _main(args.t, args.c, args.just_list)\n" }, { "alpha_fraction": 0.6185010075569153, "alphanum_fraction": 0.7488183379173279, "avg_line_length": 36.025001525878906, "blob_id": "21a4ca22dfb312de2f12f62af4034faca5994589", "content_id": "cf4c8c22c58751cb106b40b6391f00b573f87a79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1481, "license_type": "permissive", "max_line_length": 157, "num_lines": 40, "path": "/README.md", "repo_name": "noamz/slack-delete", "src_encoding": "UTF-8", "text": "# slack-delete\n\nA simple Python script for deleting Slack messages in bulk from the command line.\n\n```\n$ python3 slack-delete.py -h\nusage: slack-delete.py [-h] -c C -t T [-l]\n\noptional arguments:\n -h, --help show this help message and exit\n -c C slack channel\n -t T slack token\n -l just list messages (do not delete)\n```\n\nTo use the script you will first have to create a [Slack token](https://api.slack.com/authentication/token-types) with at least the following permissions:\n* chat:delete\n* channels:read\n* channels:history\n\nThe easiest thing to do might be to create a [legacy token](https://api.slack.com/legacy/custom-integrations/legacy-tokens), although Slack discourages this.\n\nSample session:\n```\n$ python3 slack-delete.py -c ephemera -t xoxp-314159265358979 \nListing messages in #ephemera:\n1585141725.001200[pinned] This is a channel for ephemeral conversations.\n1586624934.029300 Hello, world!\n1586625043.031000 The quick brown fox jumped over the lazy dog.\nDeleting unpinned messages in #ephemera:\n1586624934.029300 Hello, world!\nDelete 1586624934.029300? ([n]o, [y]es, [a]ll delete) n\n1586625043.031000 The quick brown fox jumped over the lazy dog.\nDelete 1586625043.031000? ([n]o, [y]es, [a]ll delete) y\nDeleting 1586625043.031000...\n$ python3 slack-delete.py -c ephemera -t xoxp-314159265358979 -l\nListing messages in #ephemera:\n1585141725.001200[pinned] This is a channel for ephemeral conversations.\n1586624934.029300 Hello, world!\n```\n" } ]
2
jakubczakon/resvis
https://github.com/jakubczakon/resvis
f26ef236189e8da2586318bdb4cdf2078883848a
1cbf0a67e9bca7281feff2bb352e24b8b26285d7
8d48cdda7e2b888633018ed407f847b2969e0795
refs/heads/master
2021-01-21T09:38:53.240786
2017-05-18T07:47:51
2017-05-18T07:47:51
91,663,399
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7443609237670898, "avg_line_length": 25.600000381469727, "blob_id": "c9cc6443a59b26beb0524584184f39b46a67d873", "content_id": "7153ae69714081d96fd7c892bfd07638d5184029", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "no_license", "max_line_length": 61, "num_lines": 5, "path": "/README.md", "repo_name": "jakubczakon/resvis", "src_encoding": "UTF-8", "text": "## What is it?\nVisualize 3 class predictions and get observations on hover. \nZoom, pan and have fun!\n\n![query](resources/resvis.png)\n" }, { "alpha_fraction": 0.5058308839797974, "alphanum_fraction": 0.7062682509422302, "avg_line_length": 16.367088317871094, "blob_id": "ddffd4b2ad723853ed1219f060330d82eba358c0", "content_id": "fe90b10cafe1809f34de350a0e67dfd6cacdfcfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1372, "license_type": "no_license", "max_line_length": 41, "num_lines": 79, "path": "/requirements.txt", "repo_name": "jakubczakon/resvis", "src_encoding": "UTF-8", "text": "appdirs==1.4.3\nbackports-abc==0.5\nbackports.shutil-get-terminal-size==1.0.0\nbackports.ssl-match-hostname==3.4.0.2\nbleach==2.0.0\nbokeh==0.12.4\ncertifi==2017.4.17\nconfigparser==3.5.0\ncycler==0.10.0\ndecorator==4.0.11\ndnspython==1.15.0\nentrypoints==0.2.2\nenum34==1.1.6\nfuncsigs==1.0.2\nfunctools32==3.2.3.post2\nfutures==3.0.5\nh5py==2.6.0\nhtml5lib==0.999999999\nipaddr==2.1.11\nipwhois==0.15.1\nipykernel==4.6.1\nipython==5.3.0\nipython-genutils==0.2.0\nipywidgets==5.0.0\nJinja2==2.9.6\njsonschema==2.6.0\njupyter==1.0.0\njupyter-client==5.0.1\njupyter-console==5.1.0\njupyter-core==4.3.0\nKeras==1.2.2\nlime==0.1.1.19\nMarkupSafe==1.0\nmatplotlib==2.0.0\nmistune==0.7.4\nmock==2.0.0\nnbconvert==5.1.1\nnbformat==4.3.0\nnltk==3.2.2\nnotebook==5.0.0\nnumpy==1.12.0\npackaging==16.8\npandas==0.19.2\npandocfilters==1.4.1\npathlib2==2.2.1\npbr==2.0.0\npexpect==4.2.1\npickleshare==0.7.4\nprompt-toolkit==1.0.14\nprotobuf==3.2.0\nptyprocess==0.5.1\nPygments==2.2.0\npyparsing==2.2.0\npython-dateutil==2.6.0\npytz==2016.10\npywhois==0.1\nPyYAML==3.12\npyzmq==16.0.2\nqtconsole==4.3.0\nrequests==2.13.0\nscandir==1.5\nscikit-learn==0.18.1\nscipy==0.18.1\nseaborn==0.7.1\nsimplegeneric==0.8.1\nsingledispatch==3.4.0.3\nsix==1.10.0\nsubprocess32==3.2.7\ntensorflow==0.12.1\nterminado==0.6\ntestpath==0.3\nTheano==0.8.2\ntornado==4.5.1\ntraitlets==4.3.2\nwcwidth==0.1.7\nwebencodings==0.5.1\nwidgetsnbextension==2.0.0\nxgboost==0.6a2\nxlrd==1.0.0\n" }, { "alpha_fraction": 0.5414012670516968, "alphanum_fraction": 0.5657209157943726, "avg_line_length": 26, "blob_id": "4a757d37221030d758c43d34c4e88396a561ea6b", "content_id": "8cb8ba49cb49007a222cd529c7a446d3bccadc9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "no_license", "max_line_length": 101, "num_lines": 64, "path": "/vis.py", "repo_name": "jakubczakon/resvis", "src_encoding": "UTF-8", "text": "import glob,os\n\nimport pandas as pd\n\nfrom bokeh.io import output_notebook\nfrom bokeh.plotting import figure, show, ColumnDataSource\nfrom bokeh.models import HoverTool, Range1d\n\nfrom ipywidgets import interact\n\n\ndef prediction_triangle_file_list(prediction_folder):\n def plot(filepath):\n df = pd.read_csv(filepath)\n prediction_triangle(df)\n return interact(plot, filepath=glob.glob('%s/*csv'%prediction_folder))\n\ndef prediction_triangle(df):\n \n df['true_label_color'] = df['true_label'].apply(label2color)\n df['true_label_text'] = df['true_label'].apply(label2text)\n \n source = ColumnDataSource(df)\n \n hover = HoverTool(\n tooltips=\"\"\"\n <div>\n <div>\n <span style=\"font-size: 17px; font-weight: bold;\">True Label: @true_label_text</span>\n </div>\n <div>\n <img\n src=\"@img_filepath\" height=\"300\" alt=\"@img_filepath\" width=\"300\"\n style=\"float: left; margin: 0px 15px 15px 0px;\"\n border=\"2\"\n ></img>\n </div>\n \n </div>\n \"\"\"\n )\n \n p = figure(plot_width=600, plot_height=600, \n toolbar_location = 'right',\n tools='pan,box_zoom,wheel_zoom,reset,resize')\n p.add_tools(hover)\n circles = p.circle('prob1', 'prob2', size=10, source=source)\n circles.glyph.fill_color = 'true_label_color'\n \n p.x_range = Range1d(0.0, 1.0)\n p.y_range = Range1d(0.0, 1.0)\n\n output_notebook()\n show(p)\n \ndef label2color(x):\n colors = ['red','green','blue','black']\n return colors[x-1]\n\ndef label2text(x):\n if x==4:\n return 'unknown'\n else:\n return x" }, { "alpha_fraction": 0.5132075548171997, "alphanum_fraction": 0.5452830195426941, "avg_line_length": 34.400001525878906, "blob_id": "064e2ef5e64fbe22af64866a5c4b0e2816362650", "content_id": "0e11291e65264f2a009ac3f0fa826c517aec590e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 530, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/utils.py", "repo_name": "jakubczakon/resvis", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ndef mock_df(n, img_filepaths):\n img_filepaths = np.random.choice(img_filepaths, n)\n prob1 = np.random.random(n)\n prob2 = [np.random.random()*(1-p) for p in prob1]\n true_label = np.random.choice([1,2,3], n)\n df = pd.DataFrame({'img_filepath': img_filepaths,\n 'true_label':true_label,\n 'prob1': prob1,\n 'prob2': prob2})\n df['prob3'] = df['prob1'] + df['prob2']\n df['prob3'] = 1 - df['prob3']\n return df" } ]
4
soulwblood/Bulbasaur-bot
https://github.com/soulwblood/Bulbasaur-bot
50eba97f2cd4e927ce9c75539be85ba49fd5f008
2d9b1f196b546d3b7a7ca0dc90fe3af0d62bcbf5
f73762c1852f81d986c7283bec4722e5b0bc6a5d
refs/heads/master
2021-05-06T08:08:59.392635
2017-12-08T23:01:01
2017-12-08T23:01:01
113,502,168
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5260534286499023, "alphanum_fraction": 0.5311408042907715, "avg_line_length": 39.54166793823242, "blob_id": "0e2bd268497af7341693a3adf276cbb7aa356b4c", "content_id": "7b89d0161d0d21c19c1bd529af1234f565243e35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19460, "license_type": "permissive", "max_line_length": 79, "num_lines": 480, "path": "/guildwars2/characters.py", "repo_name": "soulwblood/Bulbasaur-bot", "src_encoding": "UTF-8", "text": "import collections\nimport datetime\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\n\nfrom .exceptions import APIError, APINotFound\n\n\nclass CharactersMixin:\n @commands.group()\n async def character(self, ctx):\n \"\"\"Character related commands\"\"\"\n if ctx.invoked_subcommand is None:\n await self.bot.send_cmd_help(ctx)\n\n @character.command(name=\"info\")\n @commands.cooldown(1, 5, BucketType.user)\n async def character_info(self, ctx, *, character: str):\n \"\"\"Info about the given character\n You must be the owner of the character.\n\n Required permissions: characters\n \"\"\"\n\n def format_age(age):\n hours, remainder = divmod(int(age), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n if days:\n fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'\n else:\n fmt = '{h} hours, {m} minutes, and {s} seconds'\n return fmt.format(d=days, h=hours, m=minutes, s=seconds)\n\n await ctx.trigger_typing()\n character = character.title()\n endpoint = \"characters/\" + character.replace(\" \", \"%20\")\n try:\n results = await self.call_api(endpoint, ctx.author, [\"characters\"])\n except APINotFound:\n return await ctx.send(\"Invalid character name\")\n except APIError as e:\n return await self.error_handler(ctx, e)\n age = format_age(results[\"age\"])\n created = results[\"created\"].split(\"T\", 1)[0]\n deaths = results[\"deaths\"]\n deathsperhour = round(deaths / (results[\"age\"] / 3600), 1)\n if \"title\" in results:\n title = await self.get_title(results[\"title\"])\n else:\n title = None\n profession = await self.get_profession(results)\n gender = results[\"gender\"]\n race = results[\"race\"].lower()\n guild = results[\"guild\"]\n data = discord.Embed(description=title, colour=profession.color)\n data.set_thumbnail(url=profession.icon)\n data.add_field(name=\"Created at\", value=created)\n data.add_field(name=\"Played for\", value=age)\n if guild is not None:\n endpoint = \"guild/{0}\".format(results[\"guild\"])\n try:\n guild = await self.call_api(endpoint)\n except APIError as e:\n return await self.error_handler(ctx, e)\n gname = guild[\"name\"]\n gtag = guild[\"tag\"]\n data.add_field(name=\"Guild\", value=\"[{}] {}\".format(gtag, gname))\n data.add_field(name=\"Deaths\", value=deaths)\n data.add_field(\n name=\"Deaths per hour\", value=str(deathsperhour), inline=False)\n craft_list = self.get_crafting(results)\n if craft_list:\n data.add_field(name=\"Crafting\", value=\"\\n\".join(craft_list))\n data.set_author(name=character)\n data.set_footer(text=\"A {} {} {}\".format(gender.lower(), race,\n profession.name.lower()))\n try:\n await ctx.send(embed=data)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")\n\n @character.command(name=\"list\")\n @commands.cooldown(1, 15, BucketType.user)\n async def character_list(self, ctx):\n \"\"\"Lists all your characters\n\n Required permissions: characters\n \"\"\"\n user = ctx.author\n scopes = [\"characters\"]\n endpoint = \"characters?page=0\"\n await ctx.trigger_typing()\n try:\n results = await self.call_api(endpoint, user, scopes)\n except APIError as e:\n return await self.error_handler(ctx, e)\n output = [\"{.mention}, your characters: ```\"]\n for character in results:\n profession = await self.get_profession(character)\n output.append(\"{} ({})\".format(character[\"name\"], profession.name))\n output.append(\"```\")\n await ctx.send(\"\\n\".join(output).format(user))\n\n @character.command(name=\"gear\")\n @commands.cooldown(1, 10, BucketType.user)\n async def character_gear(self, ctx, *, character: str):\n \"\"\"Displays the gear of given character\n You must be the owner of the character.\n\n Required permissions: characters\n \"\"\"\n\n def handle_duplicates(upgrades):\n formatted_list = []\n for x in upgrades:\n if upgrades.count(x) != 1:\n formatted_list.append(x + \" x\" + str(upgrades.count(x)))\n upgrades[:] = [i for i in upgrades if i != x]\n else:\n formatted_list.append(x)\n return formatted_list\n\n character = character.title()\n await ctx.trigger_typing()\n try:\n results = await self.get_character(ctx, character)\n except APINotFound:\n return await ctx.send(\"Invalid character name\")\n except APIError as e:\n return await self.error_handler(ctx, e)\n eq = results[\"equipment\"]\n gear = {}\n pieces = [\n \"Helm\", \"Shoulders\", \"Coat\", \"Gloves\", \"Leggings\", \"Boots\",\n \"Ring1\", \"Ring2\", \"Amulet\", \"Accessory1\", \"Accessory2\", \"Backpack\",\n \"WeaponA1\", \"WeaponA2\", \"WeaponB1\", \"WeaponB2\"\n ]\n for piece in pieces:\n gear[piece] = {\n \"id\": None,\n \"upgrades\": [],\n \"infusions\": [],\n \"stat\": None,\n \"name\": None\n }\n for item in eq:\n for piece in pieces:\n if item[\"slot\"] == piece:\n gear[piece][\"id\"] = item[\"id\"]\n c = await self.fetch_item(item[\"id\"])\n gear[piece][\"name\"] = c[\"name\"]\n if \"upgrades\" in item:\n for u in item[\"upgrades\"]:\n upgrade = await self.db.items.find_one({\"_id\": u})\n gear[piece][\"upgrades\"].append(upgrade[\"name\"])\n if \"infusions\" in item:\n for u in item[\"infusions\"]:\n infusion = await self.db.items.find_one({\"_id\": u})\n gear[piece][\"infusions\"].append(infusion[\"name\"])\n if \"stats\" in item:\n gear[piece][\"stat\"] = await self.fetch_statname(\n item[\"stats\"][\"id\"])\n else:\n thing = await self.db.items.find_one({\n \"_id\": item[\"id\"]\n })\n try:\n statid = thing[\"details\"][\"infix_upgrade\"][\"id\"]\n gear[piece][\"stat\"] = await self.fetch_statname(\n statid)\n except:\n gear[piece][\"stat\"] = \"\"\n profession = await self.get_profession(results)\n level = results[\"level\"]\n data = discord.Embed(description=\"Gear\", colour=profession.color)\n for piece in pieces:\n if gear[piece][\"id\"] is not None:\n statname = gear[piece][\"stat\"]\n itemname = gear[piece][\"name\"]\n upgrade = handle_duplicates(gear[piece][\"upgrades\"])\n infusion = handle_duplicates(gear[piece][\"infusions\"])\n msg = \"\\n\".join(upgrade + infusion)\n if not msg:\n msg = u'\\u200b'\n data.add_field(\n name=\"{} {} [{}]\".format(statname, itemname, piece),\n value=msg,\n inline=False)\n data.set_author(name=character)\n data.set_footer(\n text=\"A level {} {} \".format(level, profession.name.lower()),\n icon_url=profession.icon)\n try:\n await ctx.send(embed=data)\n except discord.Forbidden as e:\n await ctx.send(\"Need permission to embed links\")\n\n @character.command(name=\"birthdays\")\n async def character_birthdays(self, ctx):\n \"\"\"Lists days until the next birthday for each of your characters.\n\n Required permissions: characters\n \"\"\"\n user = ctx.message.author\n endpoint = \"characters?page=0\"\n await ctx.trigger_typing()\n try:\n results = await self.call_api(endpoint, user, [\"characters\"])\n except APIError as e:\n return await self.error_handler(ctx, e)\n charlist = []\n for character in results:\n created = character[\"created\"].split(\"T\", 1)[0]\n dt = datetime.datetime.strptime(created, \"%Y-%m-%d\")\n age = datetime.datetime.utcnow() - dt\n days = age.days\n years = days / 365\n floor = int(days / 365)\n daystill = 365 - (days -\n (365 * floor)) # finds days till next birthday\n charlist.append(character[\"name\"] + \" \" + str(floor + 1) + \" \" +\n str(daystill))\n sortedlist = sorted(charlist, key=lambda v: int(v.rsplit(' ', 1)[1]))\n output = \"{.mention}, days until each of your characters birthdays:```\"\n for character in sortedlist:\n name = character.rsplit(' ', 2)[0]\n days = character.rsplit(' ', 1)[1]\n years = character.rsplit(' ', 2)[1]\n if years == \"1\":\n suffix = 'st'\n elif years == \"2\":\n suffix = 'nd'\n elif years == \"3\":\n suffix = 'rd'\n else:\n suffix = 'th'\n output += \"\\n{} {} days until {}{} birthday\".format(\n name, days, years, suffix)\n if len(output) > 1900 and '*' not in output:\n output += '*'\n output += \"```\"\n if '*' not in output:\n await ctx.send(output.format(user))\n else:\n first, second = output.split('*')\n first += \"```\"\n second = \"```\" + second\n await ctx.send(first.format(user))\n await ctx.send(second)\n\n @character.command(name=\"build\", aliases=[\"pvebuild\"])\n @commands.cooldown(1, 10, BucketType.user)\n async def character_build(self, ctx, *, character: str):\n \"\"\"Displays the build of given character\n You must be the owner of the character.\n\n Required permissions: characters\n \"\"\"\n character = character.title()\n await ctx.trigger_typing()\n try:\n results = await self.get_character(ctx, character)\n except APINotFound:\n return await ctx.send(\"Invalid character name\")\n except APIError as e:\n return await self.error_handler(ctx, e)\n embed = await self.build_embed(results, \"pve\")\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")\n\n @character.command(name=\"pvpbuild\")\n @commands.cooldown(1, 10, BucketType.user)\n async def character_pvpbuild(self, ctx, *, character: str):\n \"\"\"Displays the build of given character\n You must be the owner of the character.\n\n Required permissions: characters\n \"\"\"\n character = character.title()\n await ctx.trigger_typing()\n try:\n results = await self.get_character(ctx, character)\n except APINotFound:\n return await ctx.send(\"Invalid character name\")\n except APIError as e:\n return await self.error_handler(ctx, e)\n embed = await self.build_embed(results, \"pvp\")\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")\n\n @character.command(name=\"wvwbuild\")\n @commands.cooldown(1, 10, BucketType.user)\n async def character_wvwbuild(self, ctx, *, character: str):\n \"\"\"Displays the build of given character\n You must be the owner of the character.\n\n Required permissions: characters\n \"\"\"\n character = character.title()\n await ctx.trigger_typing()\n try:\n results = await self.get_character(ctx, character)\n except APINotFound:\n return await ctx.send(\"Invalid character name\")\n except APIError as e:\n return await self.error_handler(ctx, e)\n embed = await self.build_embed(results, \"wvw\")\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")\n\n async def build_embed(self, results, mode):\n profession = await self.get_profession(results, mode=mode)\n level = results[\"level\"]\n specializations = results[\"specializations\"][mode]\n embed = discord.Embed(\n title=\"{} build\".format(mode.upper()), color=profession.color)\n embed.set_author(name=results[\"name\"])\n for spec in specializations:\n if spec is None:\n continue\n spec_doc = await self.db.specializations.find_one({\n \"_id\": spec[\"id\"]\n })\n spec_name = spec_doc[\"name\"]\n traits = []\n for trait in spec[\"traits\"]:\n if trait is None:\n continue\n trait_doc = await self.db.traits.find_one({\"_id\": trait})\n tier = trait_doc[\"tier\"] - 1\n trait_index = spec_doc[\"major_traits\"].index(trait)\n trait_index = 1 + trait_index - tier * 3\n traits.append(\"{} ({})\".format(trait_doc[\"name\"], trait_index))\n if traits:\n embed.add_field(\n name=spec_name, value=\"\\n\".join(traits), inline=False)\n embed.set_footer(\n text=\"A level {} {} \".format(level, profession.name.lower()),\n icon_url=profession.icon)\n return embed\n\n @character.command(name=\"togglepublic\")\n @commands.cooldown(1, 1, BucketType.user)\n async def character_togglepublic(self, ctx, *, character_or_all: str):\n \"\"\"Toggle your character's (or all of them) status to public\n\n Public characters can have their gear and build checked by anyone.\n The rest is still private.\n\n Required permissions: characters\n \"\"\"\n character = character_or_all.title()\n user = ctx.author\n await ctx.trigger_typing()\n try:\n key = await self.fetch_key(user, [\"characters\"])\n results = await self.call_api(\"characters\", key=key[\"key\"])\n except APIError as e:\n return await self.error_handler(ctx, e)\n if character not in results and character != \"All\":\n return await ctx.send(\"Invalid character name\")\n characters = [character] if character != \"All\" else results\n output = []\n for char in characters:\n doc = await self.db.characters.find_one({\"name\": char})\n if doc:\n await self.db.characters.delete_one({\"name\": char})\n output.append(char + \" is now private\")\n else:\n await self.db.characters.insert_one({\n \"name\":\n char,\n \"owner\":\n user.id,\n \"owner_acc_name\":\n key[\"account_name\"]\n })\n output.append(char + \" is now public\")\n await ctx.send(\"Character status successfully changed. Anyone can \"\n \"check public characters gear and build - the rest is \"\n \"still private. To make character private \"\n \"again, type the same command.\")\n if character == \"All\":\n await user.send(\"\\n\".join(output))\n\n @character.command(name=\"crafting\")\n @commands.cooldown(1, 10, BucketType.user)\n async def character_crafting(self, ctx):\n \"\"\"Displays your characters and their crafting level\"\"\"\n endpoint = \"characters?page=0\"\n await ctx.trigger_typing()\n try:\n doc = await self.fetch_key(ctx.author, [\"characters\"])\n characters = await self.call_api(endpoint, key=doc[\"key\"])\n except APIError as e:\n return await self.error_handler(ctx, e)\n data = discord.Embed(\n description='Crafting overview', colour=self.embed_color)\n data.set_author(\n name=doc[\"account_name\"], icon_url=ctx.author.avatar_url)\n counter = 0\n for character in characters:\n if counter == 25:\n break\n craft_list = self.get_crafting(character)\n if craft_list:\n data.add_field(\n name=character[\"name\"], value=\"\\n\".join(craft_list))\n counter += 1\n try:\n await ctx.send(embed=data)\n except discord.HTTPException:\n await ctx.send(\"Need permission to embed links\")\n\n async def get_character(self, ctx, character):\n character = character.title()\n endpoint = \"characters/\" + character.replace(\" \", \"%20\")\n try:\n results = await self.call_api(endpoint, ctx.author,\n [\"characters\", \"builds\"])\n except APINotFound:\n results = await self.get_public_character(character)\n if not results:\n raise APINotFound\n return results\n\n async def get_public_character(self, character):\n character = character.title()\n endpoint = \"characters/\" + character.replace(\" \", \"%20\")\n doc = await self.db.characters.find_one({\"name\": character})\n if doc:\n user = await self.bot.get_user_info(doc[\"owner\"])\n try:\n return await self.call_api(endpoint, user)\n except:\n return None\n return None\n\n def get_crafting(self, character):\n craft_list = []\n for crafting in character[\"crafting\"]:\n rating = crafting[\"rating\"]\n discipline = crafting[\"discipline\"]\n craft_list.append(\"Level {} {}\".format(rating, discipline))\n return craft_list\n\n async def get_profession(self, character, *, mode=\"pve\"):\n async def get_elite_spec(character):\n spec = character[\"specializations\"][mode][2]\n if spec:\n spec = await self.db.specializations.find_one({\n \"_id\": spec[\"id\"]\n })\n if spec is None or not spec[\"elite\"]:\n return None\n return spec[\"name\"]\n return None\n\n def get_icon_url(prof_name):\n base_url = (\"https://api.gw2bot.info/\"\n \"resources/professions/{}_icon.png\")\n return base_url.format(prof_name.replace(\" \", \"_\").lower())\n\n Profession = collections.namedtuple(\"Profession\",\n [\"name\", \"icon\", \"color\"])\n color = discord.Color(\n int(self.gamedata[\"professions\"][character[\"profession\"]\n .lower()][\"color\"], 0))\n name = await get_elite_spec(character) or character[\"profession\"]\n icon = get_icon_url(name)\n return Profession(name, icon, color)\n" } ]
1